Theory SG_Library_Complement
section ‹SG Libary complements›
theory SG_Library_Complement
imports "HOL-Probability.Probability"
begin
text ‹In this file are included many statements that were useful to me, but belong rather
naturally to existing theories. In a perfect world, some of these statements would get included
into these files.
I tried to indicate to which of these classical theories the statements could be added.
›
subsection ‹Basic logic›
text ‹This one is certainly available, but I could not locate it...›
lemma equiv_neg:
"⟦ P ⟹ Q; ¬P ⟹ ¬Q ⟧ ⟹ (P⟷Q)"
by blast
subsection ‹Basic set theory›
lemma compl_compl_eq_id [simp]:
"UNIV - (UNIV - s) = s"
by auto
abbreviation sym_diff :: "'a set ⇒ 'a set ⇒ 'a set" (infixl "Δ" 70) where
"sym_diff A B ≡ ((A - B) ∪ (B-A))"
text ‹Not sure the next lemmas are useful, as they are proved solely by auto, so they
could be reproved automatically whenever necessary.›
lemma sym_diff_inc:
"A Δ C ⊆ A Δ B ∪ B Δ C"
by auto
lemma sym_diff_vimage [simp]:
"f-`(A Δ B) = (f-`A) Δ (f-`B)"
by auto
subsection ‹Set-Interval.thy›
text ‹The next two lemmas belong naturally to \verb+Set_Interval.thy+, next to
\verb+UN_le_add_shift+. They are not trivially equivalent to the corresponding lemmas
with large inequalities, due to the difference when $n = 0$.›
lemma UN_le_eq_Un0_strict:
"(⋃i<n+1::nat. M i) = (⋃i∈{1..<n+1}. M i) ∪ M 0" (is "?A = ?B")
proof
show "?A ⊆ ?B"
proof
fix x assume "x ∈ ?A"
then obtain i where i: "i<n+1" "x ∈ M i" by auto
show "x ∈ ?B"
proof(cases i)
case 0 with i show ?thesis by simp
next
case (Suc j) with i show ?thesis by auto
qed
qed
qed (auto)
text ‹I use repeatedly this one, but I could not find it directly›
lemma union_insert_0:
"(⋃n::nat. A n) = A 0 ∪ (⋃n∈{1..}. A n)"
by (metis UN_insert Un_insert_left sup_bot.left_neutral One_nat_def atLeast_0 atLeast_Suc_greaterThan ivl_disj_un_singleton(1))
text ‹Next one could be close to \verb+sum.nat_group+›
lemma sum_arith_progression:
"(∑r<(N::nat). (∑i<a. f (i*N+r))) = (∑j<a*N. f j)"
proof -
have *: "(∑r<N. f (i*N+r)) = (∑ j ∈ {i*N..<i*N + N}. f j)" for i
by (rule sum.reindex_bij_betw, rule bij_betw_byWitness[where ?f' = "λr. r-i*N"], auto)
have "(∑r<N. (∑i<a. f (i*N+r))) = (∑i<a. (∑r<N. f (i*N+r)))"
using sum.swap by auto
also have "... = (∑i<a. (∑ j ∈ {i*N..<i*N + N}. f j))"
using * by auto
also have "... = (∑j<a*N. f j)"
by (rule sum.nat_group)
finally show ?thesis by simp
qed
subsection ‹Miscellanous basic results›
lemma ind_from_1 [case_names 1 Suc, consumes 1]:
assumes "n > 0"
assumes "P 1"
and "⋀n. n > 0 ⟹ P n ⟹ P (Suc n)"
shows "P n"
proof -
have "(n = 0) ∨ P n"
proof (induction n)
case 0 then show ?case by auto
next
case (Suc k)
consider "Suc k = 1" | "Suc k > 1" by linarith
then show ?case
apply (cases) using assms Suc.IH by auto
qed
then show ?thesis using ‹n > 0› by auto
qed
text ‹This lemma is certainly available somewhere, but I couldn't
locate it›
lemma tends_to_real_e:
fixes u::"nat ⇒ real"
assumes "u ⇢ l" "e>0"
shows "∃N. ∀n>N. abs(u n -l) < e"
by (metis assms dist_real_def le_less lim_sequentially)
lemma nat_mod_cong:
assumes "a = b+(c::nat)"
"a mod n = b mod n"
shows "c mod n = 0"
proof -
let ?k = "a mod n"
obtain a1 where "a = a1*n + ?k" by (metis div_mult_mod_eq)
moreover obtain b1 where "b = b1*n + ?k" using assms(2) by (metis div_mult_mod_eq)
ultimately have "a1 * n + ?k = b1 * n + ?k + c" using assms(1) by arith
then have "c = (a1 - b1) * n" by (simp add: diff_mult_distrib)
then show ?thesis by simp
qed
lemma funpow_add': "(f ^^ (m + n)) x = (f ^^ m) ((f ^^ n) x)"
by (simp add: funpow_add)
text ‹The next two lemmas are not directly equivalent, since $f$ might
not be injective.›
lemma abs_Max_sum:
fixes A::"real set"
assumes "finite A" "A ≠ {}"
shows "abs(Max A) ≤ (∑a∈A. abs(a))"
by (simp add: assms member_le_sum)
lemma abs_Max_sum2:
fixes f::"_ ⇒ real"
assumes "finite A" "A ≠ {}"
shows "abs(Max (f`A)) ≤ (∑a∈A. abs(f a))"
using assms by (induct rule: finite_ne_induct, auto)
subsection ‹Conditionally-Complete-Lattices.thy›
lemma mono_cInf:
fixes f :: "'a::conditionally_complete_lattice ⇒ 'b::conditionally_complete_lattice"
assumes "mono f" "A ≠ {}" "bdd_below A"
shows "f(Inf A) ≤ Inf (f`A)"
using assms by (simp add: cINF_greatest cInf_lower monoD)
lemma mono_bij_cInf:
fixes f :: "'a::conditionally_complete_linorder ⇒ 'b::conditionally_complete_linorder"
assumes "mono f" "bij f" "A ≠ {}" "bdd_below A"
shows "f (Inf A) = Inf (f`A)"
proof -
have "(inv f) (Inf (f`A)) ≤ Inf ((inv f)`(f`A))"
apply (rule cInf_greatest, auto simp add: assms(3))
using mono_inv[OF assms(1) assms(2)] assms by (simp add: mono_def bdd_below_image_mono cInf_lower)
then have "Inf (f`A) ≤ f (Inf ((inv f)`(f`A)))"
by (metis (no_types, lifting) assms(1) assms(2) mono_def bij_inv_eq_iff)
also have "... = f(Inf A)"
using assms by (simp add: bij_is_inj)
finally show ?thesis using mono_cInf[OF assms(1) assms(3) assms(4)] by auto
qed
subsection ‹Topological-spaces.thy›
lemma open_less_abs [simp]:
"open {x. (C::real) < abs x}"
proof -
have *: "{x. C < abs x} = abs-`{C<..}" by auto
show ?thesis unfolding * by (auto intro!: continuous_intros)
qed
lemma closed_le_abs [simp]:
"closed {x. (C::real) ≤ abs x}"
proof -
have *: "{x. C ≤ ¦x¦} = abs-`{C..}" by auto
show ?thesis unfolding * by (auto intro!: continuous_intros)
qed
text ‹The next statements come from the same statements for true subsequences›
lemma eventually_weak_subseq:
fixes u::"nat ⇒ nat"
assumes "(λn. real(u n)) ⇢ ∞" "eventually P sequentially"
shows "eventually (λn. P (u n)) sequentially"
proof -
obtain N where *: "∀n≥N. P n" using assms(2) unfolding eventually_sequentially by auto
obtain M where "∀m≥M. ereal(u m) ≥ N" using assms(1) by (meson Lim_PInfty)
then have "⋀m. m ≥ M ⟹ u m ≥ N" by auto
then have "⋀m. m ≥ M ⟹ P(u m)" using ‹∀n≥N. P n› by simp
then show ?thesis unfolding eventually_sequentially by auto
qed
lemma filterlim_weak_subseq:
fixes u::"nat ⇒ nat"
assumes "(λn. real(u n)) ⇢ ∞"
shows "LIM n sequentially. u n:> at_top"
unfolding filterlim_iff by (metis assms eventually_weak_subseq)
lemma limit_along_weak_subseq:
fixes u::"nat ⇒ nat" and v::"nat ⇒ _"
assumes "(λn. real(u n)) ⇢ ∞" "v ⇢ l"
shows "(λ n. v(u n)) ⇢ l"
using filterlim_compose[of v, OF _ filterlim_weak_subseq] assms by auto
lemma frontier_indist_le:
assumes "x ∈ frontier {y. infdist y S ≤ r}"
shows "infdist x S = r"
proof -
have "infdist x S = r" if H: "∀e>0. (∃y. infdist y S ≤ r ∧ dist x y < e) ∧ (∃z. ¬ infdist z S ≤ r ∧ dist x z < e)"
proof -
have "infdist x S < r + e" if "e > 0" for e
proof -
obtain y where "infdist y S ≤ r" "dist x y < e"
using H ‹e > 0› by blast
then show ?thesis
by (metis add.commute add_mono_thms_linordered_field(3) infdist_triangle le_less_trans)
qed
then have A: "infdist x S ≤ r"
by (meson field_le_epsilon order.order_iff_strict)
have "r < infdist x S + e" if "e > 0" for e
proof -
obtain y where "¬(infdist y S ≤ r)" "dist x y < e"
using H ‹e > 0› by blast
then have "r < infdist y S" by auto
also have "... ≤ infdist x S + dist y x"
by (rule infdist_triangle)
finally show ?thesis using ‹dist x y < e›
by (simp add: dist_commute)
qed
then have B: "r ≤ infdist x S"
by (meson field_le_epsilon order.order_iff_strict)
show ?thesis using A B by auto
qed
then show ?thesis
using assms unfolding frontier_straddle by auto
qed
subsection ‹Limits›
text ‹The next lemmas are not very natural, but I needed them several times›
lemma tendsto_shift_1_over_n [tendsto_intros]:
fixes f::"nat ⇒ real"
assumes "(λn. f n / n) ⇢ l"
shows "(λn. f (n+k) / n) ⇢ l"
proof -
have "(1+k*(1/n))* (f(n+k)/(n+k)) = f(n+k)/n" if "n>0" for n using that by (auto simp add: divide_simps)
with eventually_mono[OF eventually_gt_at_top[of "0::nat"] this]
have "eventually (λn.(1+k*(1/n))* (f(n+k)/(n+k)) = f(n+k)/n) sequentially"
by auto
moreover have "(λn. (1+k*(1/n))* (f(n+k)/(n+k))) ⇢ (1+real k*0) * l"
by (intro tendsto_intros LIMSEQ_ignore_initial_segment assms)
ultimately show ?thesis using Lim_transform_eventually by auto
qed
lemma tendsto_shift_1_over_n' [tendsto_intros]:
fixes f::"nat ⇒ real"
assumes "(λn. f n / n) ⇢ l"
shows "(λn. f (n-k) / n) ⇢ l"
proof -
have "(1-k*(1/(n+k)))* (f n/ n) = f n/(n+k)" if "n>0" for n using that by (auto simp add: divide_simps)
with eventually_mono[OF eventually_gt_at_top[of "0::nat"] this]
have "eventually (λn. (1-k*(1/(n+k)))* (f n/ n) = f n/(n+k)) sequentially"
by auto
moreover have "(λn. (1-k*(1/(n+k)))* (f n/ n)) ⇢ (1-real k*0) * l"
by (intro tendsto_intros assms LIMSEQ_ignore_initial_segment)
ultimately have "(λn. f n / (n+k)) ⇢ l" using Lim_transform_eventually by auto
then have a: "(λn. f(n-k)/(n-k+k)) ⇢ l" using seq_offset_neg by auto
have "f(n-k)/(n-k+k) = f(n-k)/n" if "n>k" for n
using that by auto
with eventually_mono[OF eventually_gt_at_top[of k] this]
have "eventually (λn. f(n-k)/(n-k+k) = f(n-k)/n) sequentially"
by auto
with Lim_transform_eventually[OF a this]
show ?thesis by auto
qed
declare LIMSEQ_realpow_zero [tendsto_intros]
subsection ‹Topology-Euclidean-Space›
text ‹A (more usable) variation around \verb+continuous_on_closure_sequentially+. The assumption
that the spaces are metric spaces is definitely too strong, but sufficient for most applications.›
lemma continuous_on_closure_sequentially':
fixes f::"'a::metric_space ⇒ 'b::metric_space"
assumes "continuous_on (closure C) f"
"⋀(n::nat). u n ∈ C"
"u ⇢ l"
shows "(λn. f (u n)) ⇢ f l"
proof -
have "l ∈ closure C" unfolding closure_sequential using assms by auto
then show ?thesis
using ‹continuous_on (closure C) f› unfolding comp_def continuous_on_closure_sequentially
using assms by auto
qed
subsection ‹Convexity›
lemma convex_on_mean_ineq:
fixes f::"real ⇒ real"
assumes "convex_on A f" "x ∈ A" "y ∈ A"
shows "f ((x+y)/2) ≤ (f x + f y) / 2"
using convex_onD[OF assms(1), of "1/2" x y] using assms by (auto simp add: divide_simps)
lemma convex_on_closure:
assumes "convex (C::'a::real_normed_vector set)"
"convex_on C f"
"continuous_on (closure C) f"
shows "convex_on (closure C) f"
proof (rule convex_onI)
fix x y::'a and t::real
assume "x ∈ closure C" "y ∈ closure C" "0 < t" "t < 1"
obtain u v::"nat ⇒ 'a" where *: "⋀n. u n ∈ C" "u ⇢ x"
"⋀n. v n ∈ C" "v ⇢ y"
using ‹x ∈ closure C› ‹y ∈ closure C› unfolding closure_sequential by blast
define w where "w = (λn. (1-t) *⇩R (u n) + t *⇩R (v n))"
have "w n ∈ C" for n
using ‹0 < t› ‹t< 1› convexD[OF ‹convex C› *(1)[of n] *(3)[of n]] unfolding w_def by auto
have "w ⇢ ((1-t) *⇩R x + t *⇩R y)"
unfolding w_def using *(2) *(4) by (intro tendsto_intros)
have *: "f(w n) ≤ (1-t) * f(u n) + t * f (v n)" for n
using *(1) *(3) ‹convex_on C f› ‹0<t› ‹t<1› less_imp_le unfolding w_def
convex_on_alt by (simp add: add.commute)
have i: "(λn. f (w n)) ⇢ f ((1-t) *⇩R x + t *⇩R y)"
by (rule continuous_on_closure_sequentially'[OF assms(3) ‹⋀n. w n ∈ C› ‹w ⇢ ((1-t) *⇩R x + t *⇩R y)›])
have ii: "(λn. (1-t) * f(u n) + t * f (v n)) ⇢ (1-t) * f x + t * f y"
apply (intro tendsto_intros)
apply (rule continuous_on_closure_sequentially'[OF assms(3) ‹⋀n. u n ∈ C› ‹u ⇢ x›])
apply (rule continuous_on_closure_sequentially'[OF assms(3) ‹⋀n. v n ∈ C› ‹v ⇢ y›])
done
show "f ((1 - t) *⇩R x + t *⇩R y) ≤ (1 - t) * f x + t * f y"
apply (rule LIMSEQ_le[OF i ii]) using * by auto
qed
lemma convex_on_norm [simp]:
"convex_on UNIV (λ(x::'a::real_normed_vector). norm x)"
using convex_on_dist[of UNIV "0::'a"] by auto
lemma continuous_abs_powr [continuous_intros]:
assumes "p > 0"
shows "continuous_on UNIV (λ(x::real). ¦x¦ powr p)"
apply (rule continuous_on_powr') using assms by (auto intro: continuous_intros)
lemma continuous_mult_sgn [continuous_intros]:
fixes f::"real ⇒ real"
assumes "continuous_on UNIV f" "f 0 = 0"
shows "continuous_on UNIV (λx. sgn x * f x)"
proof -
have *: "continuous_on {0..} (λx. sgn x * f x)"
apply (subst continuous_on_cong[of "{0..}" "{0..}" _ f], auto simp add: sgn_real_def assms(2))
by (rule continuous_on_subset[OF assms(1)], auto)
have **: "continuous_on {..0} (λx. sgn x * f x)"
apply (subst continuous_on_cong[of "{..0}" "{..0}" _ "λx. -f x"], auto simp add: sgn_real_def assms(2))
by (rule continuous_on_subset[of UNIV], auto simp add: assms intro!: continuous_intros)
show ?thesis
using continuous_on_closed_Un[OF _ _ * **] apply (auto intro: continuous_intros)
using continuous_on_subset by fastforce
qed
lemma DERIV_abs_powr [derivative_intros]:
assumes "p > (1::real)"
shows "DERIV (λx. ¦x¦ powr p) x :> p * sgn x * ¦x¦ powr (p - 1)"
proof -
consider "x = 0" | "x>0" | "x < 0" by linarith
then show ?thesis
proof (cases)
case 1
have "continuous_on UNIV (λx. sgn x * ¦x¦ powr (p - 1))"
by (auto simp add: assms intro!:continuous_intros)
then have "(λh. sgn h * ¦h¦ powr (p-1)) ─0→ (λh. sgn h * ¦h¦ powr (p-1)) 0"
using continuous_on_def by blast
moreover have "¦h¦ powr p / h = sgn h * ¦h¦ powr (p-1)" for h
proof -
have "¦h¦ powr p / h = sgn h * ¦h¦ powr p / ¦h¦"
by (auto simp add: algebra_simps divide_simps sgn_real_def)
also have "... = sgn h * ¦h¦ powr (p-1)"
using assms apply (cases "h = 0") apply (auto)
by (metis abs_ge_zero powr_diff [symmetric] powr_one_gt_zero_iff times_divide_eq_right)
finally show ?thesis by simp
qed
ultimately have "(λh. ¦h¦ powr p / h) ─0→ 0" by auto
then show ?thesis unfolding DERIV_def by (auto simp add: ‹x = 0›)
next
case 2
have *: "∀⇩F y in nhds x. ¦y¦ powr p = y powr p"
unfolding eventually_nhds apply (rule exI[of _ "{0<..}"]) using ‹x > 0› by auto
show ?thesis
apply (subst DERIV_cong_ev[of _ x _ "(λx. x powr p)" _ "p * x powr (p-1)"])
using ‹x > 0› by (auto simp add: * has_real_derivative_powr)
next
case 3
have *: "∀⇩F y in nhds x. ¦y¦ powr p = (-y) powr p"
unfolding eventually_nhds apply (rule exI[of _ "{..<0}"]) using ‹x < 0› by auto
show ?thesis
apply (subst DERIV_cong_ev[of _ x _ "(λx. (-x) powr p)" _ "p * (- x) powr (p - real 1) * - 1"])
using ‹x < 0› apply (simp, simp add: *, simp)
apply (rule DERIV_fun_powr[of "λy. -y" "-1" "x" p]) using ‹x < 0› by (auto simp add: derivative_intros)
qed
qed
lemma convex_abs_powr:
assumes "p ≥ 1"
shows "convex_on UNIV (λx::real. ¦x¦ powr p)"
proof (cases "p = 1")
case True
have "convex_on UNIV (λx::real. norm x)"
by (rule convex_on_norm)
moreover have "¦x¦ powr p = norm x" for x using True by auto
ultimately show ?thesis by simp
next
case False
then have "p > 1" using assms by auto
define g where "g = (λx::real. p * sgn x * ¦x¦ powr (p - 1))"
have *: "DERIV (λx. ¦x¦ powr p) x :> g x" for x
unfolding g_def using ‹p>1› by (intro derivative_intros)
have **: "g x ≤ g y" if "x ≤ y" for x y
proof -
consider "x ≥ 0 ∧ y ≥ 0" | "x ≤ 0 ∧ y ≤ 0" | "x < 0 ∧ y > 0" using ‹x ≤ y› by linarith
then show ?thesis
proof (cases)
case 1
then show ?thesis unfolding g_def sgn_real_def using ‹p>1› ‹x ≤ y› by (auto simp add: powr_mono2)
next
case 2
then show ?thesis unfolding g_def sgn_real_def using ‹p>1› ‹x ≤ y› by (auto simp add: powr_mono2)
next
case 3
then have "g x ≤ 0" "0 ≤ g y" unfolding g_def using ‹p > 1› by auto
then show ?thesis by simp
qed
qed
show ?thesis
apply (rule convex_on_realI[of _ _ g]) using * ** by auto
qed
lemma convex_powr:
assumes "p ≥ 1"
shows "convex_on {0..} (λx::real. x powr p)"
proof -
have "convex_on {0..} (λx::real. ¦x¦ powr p)"
using convex_abs_powr[OF ‹p ≥ 1›] convex_on_subset by auto
moreover have "¦x¦ powr p = x powr p" if "x ∈ {0..}" for x using that by auto
ultimately show ?thesis by (simp add: convex_on_def)
qed
lemma convex_powr':
assumes "p > 0" "p ≤ 1"
shows "convex_on {0..} (λx::real. - (x powr p))"
proof -
have "convex_on {0<..} (λx::real. - (x powr p))"
apply (rule convex_on_realI[of _ _ "λx. -p * x powr (p-1)"])
apply (auto intro!:derivative_intros simp add: has_real_derivative_powr)
using ‹p > 0› ‹p ≤ 1› by (auto simp add: algebra_simps divide_simps powr_mono2')
moreover have "continuous_on {0..} (λx::real. - (x powr p))"
by (rule continuous_on_minus, rule continuous_on_powr', auto simp add: ‹p > 0› intro!: continuous_intros)
moreover have "{(0::real)..} = closure {0<..}" "convex {(0::real)<..}" by auto
ultimately show ?thesis using convex_on_closure by metis
qed
lemma convex_fx_plus_fy_ineq:
fixes f::"real ⇒ real"
assumes "convex_on {0..} f"
"x ≥ 0" "y ≥ 0" "f 0 = 0"
shows "f x + f y ≤ f (x+y)"
proof -
have *: "f a + f b ≤ f (a+b)" if "a ≥ 0" "b ≥ a" for a b
proof (cases "a = 0")
case False
then have "a > 0" "b > 0" using ‹b ≥ a› ‹a ≥ 0› by auto
have "(f 0 - f a) / (0 - a) ≤ (f 0 - f (a+b))/ (0 - (a+b))"
apply (rule convex_on_diff[OF ‹convex_on {0..} f›]) using ‹a > 0› ‹b > 0› by auto
also have "... ≤ (f b - f (a+b)) / (b - (a+b))"
apply (rule convex_on_diff[OF ‹convex_on {0..} f›]) using ‹a > 0› ‹b > 0› by auto
finally show ?thesis
using ‹a > 0› ‹b > 0› ‹f 0 = 0› by (auto simp add: divide_simps algebra_simps)
qed (simp add: ‹f 0 = 0›)
then show ?thesis
using ‹x ≥ 0› ‹y ≥ 0› by (metis add.commute le_less not_le)
qed
lemma x_plus_y_p_le_xp_plus_yp:
fixes p x y::real
assumes "p > 0" "p ≤ 1" "x ≥ 0" "y ≥ 0"
shows "(x + y) powr p ≤ x powr p + y powr p"
using convex_fx_plus_fy_ineq[OF convex_powr'[OF ‹p > 0› ‹p ≤ 1›] ‹x ≥ 0› ‹y ≥ 0›] by auto
subsection ‹Nonnegative-extended-real.thy›
lemma x_plus_top_ennreal [simp]:
"x + ⊤ = (⊤::ennreal)"
by simp
lemma ennreal_ge_nat_imp_PInf:
fixes x::ennreal
assumes "⋀N. x ≥ of_nat N"
shows "x = ∞"
using assms apply (cases x, auto) by (meson not_less reals_Archimedean2)
lemma ennreal_archimedean:
assumes "x ≠ (∞::ennreal)"
shows "∃n::nat. x ≤ n"
using assms ennreal_ge_nat_imp_PInf linear by blast
lemma e2ennreal_mult:
fixes a b::ereal
assumes "a ≥ 0"
shows "e2ennreal(a * b) = e2ennreal a * e2ennreal b"
by (metis assms e2ennreal_neg eq_onp_same_args ereal_mult_le_0_iff linear times_ennreal.abs_eq)
lemma e2ennreal_mult':
fixes a b::ereal
assumes "b ≥ 0"
shows "e2ennreal(a * b) = e2ennreal a * e2ennreal b"
using e2ennreal_mult[OF assms, of a] by (simp add: mult.commute)
lemma SUP_real_ennreal:
assumes "A ≠ {}" "bdd_above (f`A)"
shows "(SUP a∈A. ennreal (f a)) = ennreal(SUP a∈A. f a)"
apply (rule antisym, simp add: SUP_least assms(2) cSUP_upper ennreal_leI)
by (metis assms(1) ennreal_SUP ennreal_less_top le_less)
lemma e2ennreal_Liminf:
"F ≠ bot ⟹ e2ennreal (Liminf F f) = Liminf F (λn. e2ennreal (f n))"
by (rule Liminf_compose_continuous_mono[symmetric])
(auto simp: mono_def e2ennreal_mono continuous_on_e2ennreal)
lemma e2ennreal_eq_infty[simp]: "0 ≤ x ⟹ e2ennreal x = top ⟷ x = ∞"
by (cases x) (auto)
lemma ennreal_Inf_cmult:
assumes "c>(0::real)"
shows "Inf {ennreal c * x |x. P x} = ennreal c * Inf {x. P x}"
proof -
have "(λx::ennreal. c * x) (Inf {x::ennreal. P x}) = Inf ((λx::ennreal. c * x)`{x::ennreal. P x})"
apply (rule mono_bij_Inf)
apply (simp add: monoI mult_left_mono)
apply (rule bij_betw_byWitness[of _ "λx. (x::ennreal) / c"], auto simp add: assms)
apply (metis assms ennreal_lessI ennreal_neq_top mult.commute mult_divide_eq_ennreal not_less_zero)
apply (metis assms divide_ennreal_def ennreal_less_zero_iff ennreal_neq_top less_irrefl mult.assoc mult.left_commute mult_divide_eq_ennreal)
done
then show ?thesis by (simp only: setcompr_eq_image[symmetric])
qed
lemma continuous_on_const_minus_ennreal:
fixes f :: "'a :: topological_space ⇒ ennreal"
shows "continuous_on A f ⟹ continuous_on A (λx. a - f x)"
including ennreal.lifting
proof (transfer fixing: A; clarsimp)
fix f :: "'a ⇒ ereal" and a :: "ereal" assume "0 ≤ a" "∀x. 0 ≤ f x" and f: "continuous_on A f"
then show "continuous_on A (λx. max 0 (a - f x))"
proof cases
assume "∃r. a = ereal r"
with f show ?thesis
by (auto simp: continuous_on_def minus_ereal_def ereal_Lim_uminus[symmetric]
intro!: tendsto_add_ereal_general tendsto_max)
next
assume "∄r. a = ereal r"
with ‹0 ≤ a› have "a = ∞"
by (cases a) auto
then show ?thesis
by (simp add: continuous_on_const)
qed
qed
lemma const_minus_Liminf_ennreal:
fixes a :: ennreal
shows "F ≠ bot ⟹ a - Liminf F f = Limsup F (λx. a - f x)"
by (intro Limsup_compose_continuous_antimono[symmetric])
(auto simp: antimono_def ennreal_mono_minus continuous_on_id continuous_on_const_minus_ennreal)
lemma tendsto_cmult_ennreal [tendsto_intros]:
fixes c l::ennreal
assumes "¬(c = ∞ ∧ l = 0)"
"(f ⤏ l) F"
shows "((λx. c * f x) ⤏ c * l) F"
by (cases "c = 0", insert assms, auto intro!: tendsto_intros)
subsection ‹Indicator-Function.thy›
text ‹There is something weird with \verb+sum_mult_indicator+: it is defined both
in Indicator.thy and BochnerIntegration.thy, with a different meaning. I am surprised
there is no name collision... Here, I am using the version from BochnerIntegration.›
lemma sum_indicator_eq_card2:
assumes "finite I"
shows "(∑i∈I. (indicator (P i) x)::nat) = card {i∈I. x ∈ P i}"
using sum_mult_indicator [OF assms, of "λy. 1::nat" P "λy. x"]
unfolding card_eq_sum by auto
lemma disjoint_family_indicator_le_1:
assumes "disjoint_family_on A I"
shows "(∑ i∈ I. indicator (A i) x) ≤ (1::'a:: {comm_monoid_add,zero_less_one})"
proof (cases "finite I")
case True
then have *: "(∑ i∈ I. indicator (A i) x) = ((indicator (⋃i∈I. A i) x)::'a)"
by (simp add: indicator_UN_disjoint[OF True assms(1), of x])
show ?thesis
unfolding * unfolding indicator_def by (simp add: order_less_imp_le)
next
case False
then show ?thesis by (simp add: order_less_imp_le)
qed
subsection ‹sigma-algebra.thy›
lemma algebra_intersection:
assumes "algebra Ω A"
"algebra Ω B"
shows "algebra Ω (A ∩ B)"
apply (subst algebra_iff_Un) using assms by (auto simp add: algebra_iff_Un)
lemma sigma_algebra_intersection:
assumes "sigma_algebra Ω A"
"sigma_algebra Ω B"
shows "sigma_algebra Ω (A ∩ B)"
apply (subst sigma_algebra_iff) using assms by (auto simp add: sigma_algebra_iff algebra_intersection)
lemma subalgebra_M_M [simp]:
"subalgebra M M"
by (simp add: subalgebra_def)
text ‹The next one is \verb+disjoint_family_Suc+ with inclusions reversed.›
lemma disjoint_family_Suc2:
assumes Suc: "⋀n. A (Suc n) ⊆ A n"
shows "disjoint_family (λi. A i - A (Suc i))"
proof -
have "A (m+n) ⊆ A n" for m n
proof (induct m)
case 0 show ?case by simp
next
case (Suc m) then show ?case
by (metis Suc_eq_plus1 assms add.commute add.left_commute subset_trans)
qed
then have "A m ⊆ A n" if "m > n" for m n
by (metis that add.commute le_add_diff_inverse nat_less_le)
then show ?thesis
by (auto simp add: disjoint_family_on_def)
(metis insert_absorb insert_subset le_SucE le_antisym not_le_imp_less)
qed
subsection ‹Measure-Space.thy›
lemma AE_equal_sum:
assumes "⋀i. AE x in M. f i x = g i x"
shows "AE x in M. (∑i∈I. f i x) = (∑i∈I. g i x)"
proof (cases)
assume "finite I"
have "∃A. A ∈ null_sets M ∧ (∀x∈ (space M - A). f i x = g i x)" for i
using assms(1)[of i] by (metis (mono_tags, lifting) AE_E3)
then obtain A where A: "⋀i. A i ∈ null_sets M ∧ (∀x∈ (space M -A i). f i x = g i x)"
by metis
define B where "B = (⋃i∈I. A i)"
have "B ∈ null_sets M" using ‹finite I› A B_def by blast
then have "AE x in M. x ∈ space M - B" by (simp add: AE_not_in)
moreover
{
fix x assume "x ∈ space M - B"
then have "⋀i. i ∈ I ⟹ f i x = g i x" unfolding B_def using A by auto
then have "(∑i∈I. f i x) = (∑i∈I. g i x)" by auto
}
ultimately show ?thesis by auto
qed (simp)
lemma emeasure_pos_unionE:
assumes "⋀ (N::nat). A N ∈ sets M"
"emeasure M (⋃N. A N) > 0"
shows "∃N. emeasure M (A N) > 0"
proof (rule ccontr)
assume "¬(∃N. emeasure M (A N) > 0)"
then have "⋀N. A N ∈ null_sets M"
using assms(1) by auto
then have "(⋃N. A N) ∈ null_sets M" by auto
then show False using assms(2) by auto
qed
lemma (in prob_space) emeasure_intersection:
fixes e::"nat ⇒ real"
assumes [measurable]: "⋀n. U n ∈ sets M"
and [simp]: "⋀n. 0 ≤ e n" "summable e"
and ge: "⋀n. emeasure M (U n) ≥ 1 - (e n)"
shows "emeasure M (⋂n. U n) ≥ 1 - (∑n. e n)"
proof -
define V where "V = (λn. space M - (U n))"
have [measurable]: "V n ∈ sets M" for n
unfolding V_def by auto
have *: "emeasure M (V n) ≤ e n" for n
unfolding V_def using ge[of n] by (simp add: emeasure_eq_measure prob_compl ennreal_leI)
have "emeasure M (⋃n. V n) ≤ (∑n. emeasure M (V n))"
by (rule emeasure_subadditive_countably, auto)
also have "... ≤ (∑n. ennreal (e n))"
using * by (intro suminf_le) auto
also have "... = ennreal (∑n. e n)"
by (intro suminf_ennreal_eq) auto
finally have "emeasure M (⋃n. V n) ≤ suminf e" by simp
then have "1 - suminf e ≤ emeasure M (space M - (⋃n. V n))"
by (simp add: emeasure_eq_measure prob_compl suminf_nonneg)
also have "... ≤ emeasure M (⋂n. U n)"
by (rule emeasure_mono) (auto simp: V_def)
finally show ?thesis by simp
qed
lemma null_sym_diff_transitive:
assumes "A Δ B ∈ null_sets M" "B Δ C ∈ null_sets M"
and [measurable]: "A ∈ sets M" "C ∈ sets M"
shows "A Δ C ∈ null_sets M"
proof -
have "A Δ B ∪ B Δ C ∈ null_sets M" using assms(1) assms(2) by auto
moreover have "A Δ C ⊆ A Δ B ∪ B Δ C" by auto
ultimately show ?thesis by (meson null_sets_subset assms(3) assms(4) sets.Diff sets.Un)
qed
lemma Delta_null_of_null_is_null:
assumes "B ∈ sets M" "A Δ B ∈ null_sets M" "A ∈ null_sets M"
shows "B ∈ null_sets M"
proof -
have "B ⊆ A ∪ (A Δ B)" by auto
then show ?thesis using assms by (meson null_sets.Un null_sets_subset)
qed
lemma Delta_null_same_emeasure:
assumes "A Δ B ∈ null_sets M" and [measurable]: "A ∈ sets M" "B ∈ sets M"
shows "emeasure M A = emeasure M B"
proof -
have "A = (A ∩ B) ∪ (A-B)" by blast
moreover have "A-B ∈ null_sets M" using assms null_sets_subset by blast
ultimately have a: "emeasure M A = emeasure M (A ∩ B)" using emeasure_Un_null_set by (metis assms(2) assms(3) sets.Int)
have "B = (A ∩ B) ∪ (B-A)" by blast
moreover have "B-A ∈ null_sets M" using assms null_sets_subset by blast
ultimately have "emeasure M B = emeasure M (A ∩ B)" using emeasure_Un_null_set by (metis assms(2) assms(3) sets.Int)
then show ?thesis using a by auto
qed
lemma AE_upper_bound_inf_ereal:
fixes F G::"'a ⇒ ereal"
assumes "⋀e. (e::real) > 0 ⟹ AE x in M. F x ≤ G x + e"
shows "AE x in M. F x ≤ G x"
proof -
have "AE x in M. ∀n::nat. F x ≤ G x + ereal (1 / Suc n)"
using assms by (auto simp: AE_all_countable)
then show ?thesis
proof (eventually_elim)
fix x assume x: "∀n::nat. F x ≤ G x + ereal (1 / Suc n)"
show "F x ≤ G x"
proof (intro ereal_le_epsilon2[of _ "G x"] allI impI)
fix e :: real assume "0 < e"
then obtain n where n: "1 / Suc n < e"
by (blast elim: nat_approx_posE)
have "F x ≤ G x + 1 / Suc n"
using x by simp
also have "… ≤ G x + e"
using n by (intro add_mono ennreal_leI) auto
finally show "F x ≤ G x + ereal e" .
qed
qed
qed
text ‹Egorov theorem asserts that, if a sequence of functions converges almost everywhere to a
limit, then the convergence is uniform on a subset of close to full measure. The first step in the
proof is the following lemma, often useful by itself, asserting the same result for predicates:
if a property $P_n x$ is eventually true for almost every $x$, then there exists $N$
such that $P_n x$ is true for all $n\geq N$ and all $x$ in a set of close to full measure.
›
lemma (in finite_measure) Egorov_lemma:
assumes [measurable]: "⋀n. (P n) ∈ measurable M (count_space UNIV)"
and "AE x in M. eventually (λn. P n x) sequentially"
"epsilon > 0"
shows "∃U N. U ∈ sets M ∧ (∀n ≥ N. ∀x ∈ U. P n x) ∧ emeasure M (space M - U) < epsilon"
proof -
define K where "K = (λn. {x ∈ space M. ∃k≥n. ¬(P k x)})"
have [measurable]: "K n ∈ sets M" for n
unfolding K_def by auto
have "x ∉ (⋂n. K n)" if "eventually (λn. P n x) sequentially" for x
unfolding K_def using that unfolding K_def eventually_sequentially by auto
then have "AE x in M. x ∉ (⋂n. K n)" using assms by auto
then have Z: "0 = emeasure M (⋂n. K n)"
using AE_iff_measurable[of "(⋂n. K n)" M "λx. x ∉ (⋂n. K n)"] unfolding K_def by auto
have *: "(λn. emeasure M (K n)) ⇢ 0"
unfolding Z apply (rule Lim_emeasure_decseq) using order_trans by (auto simp add: K_def decseq_def)
have "eventually (λn. emeasure M (K n) < epsilon) sequentially"
by (rule order_tendstoD(2)[OF * ‹epsilon > 0›])
then obtain N where N: "⋀n. n ≥ N ⟹ emeasure M (K n) < epsilon"
unfolding eventually_sequentially by auto
define U where "U = space M - K N"
have A [measurable]: "U ∈ sets M" unfolding U_def by auto
have "space M - U = K N"
unfolding U_def K_def by auto
then have B: "emeasure M (space M - U) < epsilon"
using N by auto
have "∀n ≥ N. ∀x ∈ U. P n x"
unfolding U_def K_def by auto
then show ?thesis using A B by blast
qed
text ‹The next lemma asserts that, in an uncountable family of disjoint sets, then there is one
set with zero measure (and in fact uncountably many). It is often applied to the boundaries of
$r$-neighborhoods of a given set, to show that one could choose $r$ for which this boundary has
zero measure (this shows up often in relation with weak convergence).›
lemma (in finite_measure) uncountable_disjoint_family_then_exists_zero_measure:
assumes [measurable]: "⋀i. i ∈ I ⟹ A i ∈ sets M"
and "uncountable I"
"disjoint_family_on A I"
shows "∃i∈I. measure M (A i) = 0"
proof -
define f where "f = (λ(r::real). {i ∈ I. measure M (A i) > r})"
have *: "finite (f r)" if "r > 0" for r
proof -
obtain N::nat where N: "measure M (space M)/r ≤ N"
using real_arch_simple by blast
have "finite (f r) ∧ card (f r) ≤ N"
proof (rule finite_if_finite_subsets_card_bdd)
fix G assume G: "G ⊆ f r" "finite G"
then have "G ⊆ I" unfolding f_def by auto
have "card G * r = (∑i ∈ G. r)" by auto
also have "... ≤ (∑i ∈ G. measure M (A i))"
apply (rule sum_mono) using G unfolding f_def by auto
also have "... = measure M (⋃i∈G. A i)"
apply (rule finite_measure_finite_Union[symmetric])
using ‹finite G› ‹G ⊆ I› ‹disjoint_family_on A I› disjoint_family_on_mono by auto
also have "... ≤ measure M (space M)"
by (simp add: bounded_measure)
finally have "card G ≤ measure M (space M)/r"
using ‹r > 0› by (simp add: divide_simps)
then show "card G ≤ N" using N by auto
qed
then show ?thesis by simp
qed
have "countable (⋃n. f (((1::real)/2)^n))"
by (rule countable_UN, auto intro!: countable_finite *)
then have "I - (⋃n. f (((1::real)/2)^n)) ≠ {}"
using assms(2) by (metis countable_empty uncountable_minus_countable)
then obtain i where "i ∈ I" "i ∉ (⋃n. f ((1/2)^n))" by auto
then have "measure M (A i) ≤ (1 / 2) ^ n" for n
unfolding f_def using linorder_not_le by auto
moreover have "(λn. ((1::real) / 2) ^ n) ⇢ 0"
by (intro tendsto_intros, auto)
ultimately have "measure M (A i) ≤ 0"
using LIMSEQ_le_const by force
then have "measure M (A i) = 0"
by (simp add: measure_le_0_iff)
then show ?thesis using ‹i ∈ I› by auto
qed
text ‹The next statements are useful measurability statements.›
lemma measurable_Inf [measurable]:
assumes [measurable]: "⋀(n::nat). P n ∈ measurable M (count_space UNIV)"
shows "(λx. Inf {n. P n x}) ∈ measurable M (count_space UNIV)" (is "?f ∈ _")
proof -
define A where "A = (λn. (P n)-`{True} ∩ space M - (⋃m<n. (P m)-`{True} ∩ space M))"
have A_meas [measurable]: "A n ∈ sets M" for n unfolding A_def by measurable
define B where "B = (λn. if n = 0 then (space M - (⋃n. A n)) else A (n-1))"
show ?thesis
proof (rule measurable_piecewise_restrict2[of B])
show "B n ∈ sets M" for n unfolding B_def by simp
show "space M = (⋃n. B n)"
unfolding B_def using sets.sets_into_space [OF A_meas] by auto
have *: "?f x = n" if "x ∈ A n" for x n
apply (rule cInf_eq_minimum) using that unfolding A_def by auto
moreover have **: "?f x = (Inf ({}::nat set))" if "x ∈ space M - (⋃n. A n)" for x
proof -
have "¬(P n x)" for n
apply (induction n rule: nat_less_induct) using that unfolding A_def by auto
then show ?thesis by simp
qed
ultimately have "∃c. ∀x ∈ B n. ?f x = c" for n
apply (cases "n = 0") unfolding B_def by auto
then show "∃h ∈ measurable M (count_space UNIV). ∀x ∈ B n. ?f x = h x" for n
by fastforce
qed
qed
lemma measurable_T_iter [measurable]:
fixes f::"'a ⇒ nat"
assumes [measurable]: "T ∈ measurable M M"
"f ∈ measurable M (count_space UNIV)"
shows "(λx. (T^^(f x)) x) ∈ measurable M M"
proof -
have [measurable]: "(T^^n) ∈ measurable M M" for n::nat
by (induction n, auto)
show ?thesis
by (rule measurable_compose_countable, auto)
qed
lemma measurable_infdist [measurable]:
"(λx. infdist x S) ∈ borel_measurable borel"
by (rule borel_measurable_continuous_onI, intro continuous_intros)
text ‹The next lemma shows that, in a sigma finite measure space, sets with large measure
can be approximated by sets with large but finite measure.›
lemma (in sigma_finite_measure) approx_with_finite_emeasure:
assumes W_meas: "W ∈ sets M"
and W_inf: "emeasure M W > C"
obtains Z where "Z ∈ sets M" "Z ⊆ W" "emeasure M Z < ∞" "emeasure M Z > C"
proof (cases "emeasure M W = ∞")
case True
obtain r where r: "C = ennreal r" using W_inf by (cases C, auto)
obtain Z where "Z ∈ sets M" "Z ⊆ W" "emeasure M Z < ∞" "emeasure M Z > C"
unfolding r using approx_PInf_emeasure_with_finite[OF W_meas True, of r] by auto
then show ?thesis using that by blast
next
case False
then have "W ∈ sets M" "W ⊆ W" "emeasure M W < ∞" "emeasure M W > C"
using assms apply auto using top.not_eq_extremum by blast
then show ?thesis using that by blast
qed
subsection ‹Nonnegative-Lebesgue-Integration.thy›
text ‹The next lemma is a variant of \verb+nn_integral_density+,
with the density on the right instead of the left, as seems more common.›
lemma nn_integral_densityR:
assumes [measurable]: "f ∈ borel_measurable F" "g ∈ borel_measurable F"
shows "(∫⇧+ x. f x * g x ∂F) = (∫⇧+ x. f x ∂(density F g))"
proof -
have "(∫⇧+ x. f x * g x ∂F) = (∫⇧+ x. g x * f x ∂F)" by (simp add: mult.commute)
also have "... = (∫⇧+ x. f x ∂(density F g))"
by (rule nn_integral_density[symmetric], simp_all add: assms)
finally show ?thesis by simp
qed
lemma not_AE_zero_int_ennreal_E:
fixes f::"'a ⇒ ennreal"
assumes "(∫⇧+x. f x ∂M) > 0"
and [measurable]: "f ∈ borel_measurable M"
shows "∃A∈sets M. ∃e::real>0. emeasure M A > 0 ∧ (∀x ∈ A. f x ≥ e)"
proof (rule not_AE_zero_ennreal_E, auto simp add: assms)
assume *: "AE x in M. f x = 0"
have "(∫⇧+x. f x ∂M) = (∫⇧+x. 0 ∂M)" by (rule nn_integral_cong_AE, simp add: *)
then have "(∫⇧+x. f x ∂M) = 0" by simp
then show False using assms by simp
qed
lemma (in finite_measure) nn_integral_bounded_eq_bound_then_AE:
assumes "AE x in M. f x ≤ ennreal c" "(∫⇧+x. f x ∂M) = c * emeasure M (space M)"
and [measurable]: "f ∈ borel_measurable M"
shows "AE x in M. f x = c"
proof (cases)
assume "emeasure M (space M) = 0"
then show ?thesis by (rule emeasure_0_AE)
next
assume "emeasure M (space M) ≠ 0"
have fin: "AE x in M. f x ≠ top" using assms by (auto simp: top_unique)
define g where "g = (λx. c - f x)"
have [measurable]: "g ∈ borel_measurable M" unfolding g_def by auto
have "(∫⇧+x. g x ∂M) = (∫⇧+x. c ∂M) - (∫⇧+x. f x ∂M)"
unfolding g_def by (rule nn_integral_diff, auto simp add: assms ennreal_mult_eq_top_iff)
also have "… = 0" using assms(2) by (auto simp: ennreal_mult_eq_top_iff)
finally have "AE x in M. g x = 0"
by (subst nn_integral_0_iff_AE[symmetric]) auto
then have "AE x in M. c ≤ f x" unfolding g_def using fin by (auto simp: ennreal_minus_eq_0)
then show ?thesis using assms(1) by auto
qed
lemma null_sets_density:
assumes [measurable]: "h ∈ borel_measurable M"
and "AE x in M. h x ≠ 0"
shows "null_sets (density M h) = null_sets M"
proof -
have *: "A ∈ sets M ∧ (AE x∈A in M. h x = 0) ⟷ A ∈ null_sets M" for A
proof (auto)
assume "A ∈ sets M" "AE x∈A in M. h x = 0"
then show "A ∈ null_sets M"
unfolding AE_iff_null_sets[OF ‹A ∈ sets M›] using assms(2) by auto
next
assume "A ∈ null_sets M"
then show "AE x∈A in M. h x = 0"
by (metis (mono_tags, lifting) AE_not_in eventually_mono)
qed
show ?thesis
apply (rule set_eqI)
unfolding null_sets_density_iff[OF ‹h ∈ borel_measurable M›] using * by auto
qed
text ‹The next proposition asserts that, if a function $h$ is integrable, then its integral on
any set with small enough measure is small. The good conceptual proof is by considering the
distribution of the function $h$ on $\mathbb{R}$ and looking at its tails. However, there is a
less conceptual but more direct proof, based on dominated convergence and a proof by contradiction.
This is the proof we give below.›
proposition integrable_small_integral_on_small_sets:
fixes h::"'a ⇒ real"
assumes [measurable]: "integrable M h"
and "delta > 0"
shows "∃epsilon>(0::real). ∀U ∈ sets M. emeasure M U < epsilon ⟶ abs (∫x∈U. h x ∂M) < delta"
proof (rule ccontr)
assume H: "¬ (∃epsilon>0. ∀U∈sets M. emeasure M U < ennreal epsilon ⟶ abs(set_lebesgue_integral M U h) < delta)"
have "∃f. ∀epsilon∈{0<..}. f epsilon ∈sets M ∧ emeasure M (f epsilon) < ennreal epsilon
∧ ¬(abs(set_lebesgue_integral M (f epsilon) h) < delta)"
apply (rule bchoice) using H by auto
then obtain f::"real ⇒ 'a set" where f:
"⋀epsilon. epsilon > 0 ⟹ f epsilon ∈sets M"
"⋀epsilon. epsilon > 0 ⟹ emeasure M (f epsilon) < ennreal epsilon"
"⋀epsilon. epsilon > 0 ⟹ ¬(abs(set_lebesgue_integral M (f epsilon) h) < delta)"
by blast
define A where "A = (λn::nat. f ((1/2)^n))"
have [measurable]: "A n ∈ sets M" for n
unfolding A_def using f(1) by auto
have *: "emeasure M (A n) < ennreal ((1/2)^n)" for n
unfolding A_def using f(2) by auto
have Large: "¬(abs(set_lebesgue_integral M (A n) h) < delta)" for n
unfolding A_def using f(3) by auto
have S: "summable (λn. Sigma_Algebra.measure M (A n))"
apply (rule summable_comparison_test'[of "λn. (1/2)^n" 0])
apply (rule summable_geometric, auto)
apply (subst ennreal_le_iff[symmetric], simp)
using less_imp_le[OF *] by (metis * emeasure_eq_ennreal_measure top.extremum_strict)
have "AE x in M. eventually (λn. x ∈ space M - A n) sequentially"
apply (rule borel_cantelli_AE1, auto simp add: S)
by (metis * top.extremum_strict top.not_eq_extremum)
moreover have "(λn. indicator (A n) x * h x) ⇢ 0"
if "eventually (λn. x ∈ space M - A n) sequentially" for x
proof -
have "eventually (λn. indicator (A n) x * h x = 0) sequentially"
apply (rule eventually_mono[OF that]) unfolding indicator_def by auto
then show ?thesis
unfolding eventually_sequentially using lim_explicit by force
qed
ultimately have A: "AE x in M. ((λn. indicator (A n) x * h x) ⇢ 0)"
by auto
have I: "integrable M (λx. abs(h x))"
using ‹integrable M h› by auto
have L: "(λn. abs (∫x. indicator (A n) x * h x ∂M)) ⇢ abs (∫x. 0 ∂M)"
apply (intro tendsto_intros)
apply (rule integral_dominated_convergence[OF _ _ I A])
unfolding indicator_def by auto
have "eventually (λn. abs (∫x. indicator (A n) x * h x ∂M) < delta) sequentially"
apply (rule order_tendstoD[OF L]) using ‹delta > 0› by auto
then show False
using Large by (auto simp: set_lebesgue_integral_def)
qed
text ‹We also give the version for nonnegative ennreal valued functions. It follows from the
previous one.›
proposition small_nn_integral_on_small_sets:
fixes h::"'a ⇒ ennreal"
assumes [measurable]: "h ∈ borel_measurable M"
and "delta > (0::real)" "(∫⇧+x. h x ∂M) ≠ ∞"
shows "∃epsilon>(0::real). ∀U ∈ sets M. emeasure M U < epsilon ⟶ (∫⇧+x∈U. h x ∂M) < delta"
proof -
define f where "f = (λx. enn2real(h x))"
have "AE x in M. h x ≠ ∞"
using assms by (metis nn_integral_PInf_AE)
then have *: "AE x in M. ennreal (f x) = h x"
unfolding f_def using ennreal_enn2real_if by auto
have **: "(∫⇧+x. ennreal (f x) ∂M) ≠ ∞"
using nn_integral_cong_AE[OF *] assms by auto
have [measurable]: "f ∈ borel_measurable M" unfolding f_def by auto
have "integrable M f"
apply (rule integrableI_nonneg) using assms * f_def ** apply auto
using top.not_eq_extremum by blast
obtain epsilon::real where H: "epsilon > 0" "⋀U. U ∈ sets M ⟹ emeasure M U < epsilon ⟹ abs(∫x∈U. f x ∂M) < delta"
using integrable_small_integral_on_small_sets[OF ‹integrable M f› ‹delta > 0›] by blast
have "(∫⇧+x∈U. h x ∂M) < delta" if [measurable]: "U ∈ sets M" "emeasure M U < epsilon" for U
proof -
have "(∫⇧+x. indicator U x * h x ∂M) = (∫⇧+x. ennreal(indicator U x * f x) ∂M)"
apply (rule nn_integral_cong_AE) using * unfolding indicator_def by auto
also have "... = ennreal (∫x. indicator U x * f x ∂M)"
apply (rule nn_integral_eq_integral)
apply (rule Bochner_Integration.integrable_bound[OF ‹integrable M f›])
unfolding indicator_def f_def by auto
also have "... < ennreal delta"
apply (rule ennreal_lessI) using H(2)[OF that] by (auto simp: set_lebesgue_integral_def)
finally show ?thesis by (auto simp add: mult.commute)
qed
then show ?thesis using ‹epsilon > 0› by auto
qed
subsection ‹Probability-measure.thy›
text ‹The next lemmas ensure that, if sets have a probability close to $1$, then their
intersection also does.›
lemma (in prob_space) sum_measure_le_measure_inter:
assumes "A ∈ sets M" "B ∈ sets M"
shows "prob A + prob B ≤ 1 + prob (A ∩ B)"
proof -
have "prob A + prob B = prob (A ∪ B) + prob (A ∩ B)"
by (simp add: assms fmeasurable_eq_sets measure_Un3)
also have "... ≤ 1 + prob (A ∩ B)"
by auto
finally show ?thesis by simp
qed
lemma (in prob_space) sum_measure_le_measure_inter3:
assumes [measurable]: "A ∈ sets M" "B ∈ sets M" "C ∈ sets M"
shows "prob A + prob B + prob C ≤ 2 + prob (A ∩ B ∩ C)"
using sum_measure_le_measure_inter[of B C] sum_measure_le_measure_inter[of A "B ∩ C"]
by (auto simp add: inf_assoc)
lemma (in prob_space) sum_measure_le_measure_Inter:
assumes [measurable]: "finite I" "I ≠ {}" "⋀i. i ∈ I ⟹ A i ∈ sets M"
shows "(∑i∈I. prob (A i)) ≤ real(card I) - 1 + prob (⋂i∈I. A i)"
using assms proof (induct I rule: finite_ne_induct)
fix x F assume H: "finite F" "F ≠ {}" "x ∉ F"
"((⋀i. i ∈ F ⟹ A i ∈ events) ⟹ (∑i∈F. prob (A i)) ≤ real (card F) - 1 + prob (⋂(A ` F)))"
and [measurable]: "(⋀i. i ∈ insert x F ⟹ A i ∈ events)"
have "(⋂x∈F. A x) ∈ events" using ‹finite F› ‹F ≠ {}› by auto
have "(∑i∈insert x F. prob (A i)) = (∑i∈F. prob (A i)) + prob (A x)"
using H(1) H(3) by auto
also have "... ≤ real (card F)-1 + prob (⋂(A ` F)) + prob (A x)"
using H(4) by auto
also have "... ≤ real (card F) + prob ((⋂(A ` F)) ∩ A x)"
using sum_measure_le_measure_inter[OF ‹(⋂x∈F. A x) ∈ events›, of "A x"] by auto
also have "... = real (card (insert x F)) - 1 + prob (⋂(A ` (insert x F)))"
using H(1) H(2) unfolding card_insert_disjoint[OF ‹finite F› ‹x ∉ F›] by (simp add: inf_commute)
finally show "(∑i∈insert x F. prob (A i)) ≤ real (card (insert x F)) - 1 + prob (⋂(A ` (insert x F)))"
by simp
qed (auto)
text ‹A random variable gives a small mass to small neighborhoods of
infinity.›
lemma (in prob_space) random_variable_small_tails:
assumes "alpha > 0" and [measurable]: "f ∈ borel_measurable M"
shows "∃(C::real). prob {x ∈ space M. abs(f x) ≥ C} < alpha ∧ C ≥ K"
proof -
have *: "(⋂(n::nat). {x∈space M. abs(f x) ≥ n}) = {}"
apply auto
by (metis real_arch_simple add.right_neutral add_mono_thms_linordered_field(4) not_less zero_less_one)
have **: "(λn. prob {x ∈ space M. abs(f x) ≥ n}) ⇢ prob (⋂(n::nat). {x ∈ space M. abs(f x) ≥ n})"
by (rule finite_Lim_measure_decseq, auto simp add: decseq_def)
have "eventually (λn. prob {x ∈ space M. abs(f x) ≥ n} < alpha) sequentially"
apply (rule order_tendstoD[OF _ ‹alpha > 0›]) using ** unfolding * by auto
then obtain N::nat where N: "⋀n::nat. n ≥ N ⟹ prob {x ∈ space M. abs(f x) ≥ n} < alpha"
unfolding eventually_sequentially by blast
have "∃n::nat. n ≥ N ∧ n ≥ K"
by (meson le_cases of_nat_le_iff order.trans real_arch_simple)
then obtain n::nat where n: "n ≥ N" "n ≥ K" by blast
show ?thesis
apply (rule exI[of _ "of_nat n"]) using N n by auto
qed
subsection ‹Distribution-functions.thy›
text ‹There is a locale called \verb+finite_borel_measure+ in \verb+distribution-functions.thy+.
However, it only deals with real measures, and real weak convergence. I will not need the
weak convergence in more general settings, but still it seems more natural to me to do the
proofs in the natural settings. Let me introduce the locale \verb+finite_borel_measure'+ for
this, although it would be better to rename the locale in the library file.›
locale finite_borel_measure' = finite_measure M for M :: "('a::metric_space) measure" +
assumes M_is_borel [simp, measurable_cong]: "sets M = sets borel"
begin
lemma space_eq_univ [simp]: "space M = UNIV"
using M_is_borel[THEN sets_eq_imp_space_eq] by simp
lemma measurable_finite_borel [simp]:
"f ∈ borel_measurable borel ⟹ f ∈ borel_measurable M"
by (rule borel_measurable_subalgebra[where N = borel]) auto
text ‹Any closed set can be slightly enlarged to obtain a set whose boundary has $0$ measure.›
lemma approx_closed_set_with_set_zero_measure_boundary:
assumes "closed S" "epsilon > 0" "S ≠ {}"
shows "∃r. r < epsilon ∧ r > 0 ∧ measure M {x. infdist x S = r} = 0 ∧ measure M {x. infdist x S ≤ r} < measure M S + epsilon"
proof -
have [measurable]: "S ∈ sets M"
using ‹closed S› by auto
define T where "T = (λr. {x. infdist x S ≤ r})"
have [measurable]: "T r ∈ sets borel" for r
unfolding T_def by measurable
have *: "(⋂n. T ((1/2)^n)) = S"
unfolding T_def proof (auto)
fix x assume *: "∀n. infdist x S ≤ (1 / 2) ^n"
have "infdist x S ≤ 0"
apply (rule LIMSEQ_le_const[of "λn. (1/2)^n"], intro tendsto_intros) using * by auto
then show "x ∈ S"
using assms infdist_pos_not_in_closed by fastforce
qed
have A: "((1::real)/2)^n ≤ (1/2)^m" if "m ≤ n" for m n::nat
using that by (simp add: power_decreasing)
have "(λn. measure M (T ((1/2)^n))) ⇢ measure M S"
unfolding *[symmetric] apply (rule finite_Lim_measure_decseq, auto simp add: T_def decseq_def)
using A order.trans by blast
then have B: "eventually (λn. measure M (T ((1/2)^n)) < measure M S + epsilon) sequentially"
apply (rule order_tendstoD) using ‹epsilon > 0› by simp
have C: "eventually (λn. (1/2)^n < epsilon) sequentially"
by (rule order_tendstoD[OF _ ‹epsilon > 0›], intro tendsto_intros, auto)
obtain n where n: "(1/2)^n < epsilon" "measure M (T ((1/2)^n)) < measure M S + epsilon"
using eventually_conj[OF B C] unfolding eventually_sequentially by auto
have "∃r∈{0<..<(1/2)^n}. measure M {x. infdist x S = r} = 0"
apply (rule uncountable_disjoint_family_then_exists_zero_measure, auto simp add: disjoint_family_on_def)
using uncountable_open_interval by fastforce
then obtain r where r: "r∈{0<..<(1/2)^n}" "measure M {x. infdist x S = r} = 0"
by blast
then have r2: "r > 0" "r < epsilon" using n by auto
have "measure M {x. infdist x S ≤ r} ≤ measure M {x. infdist x S ≤ (1/2)^n}"
apply (rule finite_measure_mono) using r by auto
then have "measure M {x. infdist x S ≤ r} < measure M S + epsilon"
using n(2) unfolding T_def by auto
then show ?thesis
using r(2) r2 by auto
qed
end
sublocale finite_borel_measure ⊆ finite_borel_measure'
by (standard, simp add: M_is_borel)
subsection ‹Weak-convergence.thy›
text ‹Since weak convergence is not implemented as a topology, the fact that the convergence of
a sequence implies the convergence of a subsequence is not automatic. We prove it in the lemma
below..›
lemma weak_conv_m_subseq:
assumes "weak_conv_m M_seq M" "strict_mono r"
shows "weak_conv_m (λn. M_seq (r n)) M"
using assms LIMSEQ_subseq_LIMSEQ unfolding weak_conv_m_def weak_conv_def comp_def by auto
context
fixes μ :: "nat ⇒ real measure"
and M :: "real measure"
assumes μ: "⋀n. real_distribution (μ n)"
assumes M: "real_distribution M"
assumes μ_to_M: "weak_conv_m μ M"
begin
text ‹The measure of a closed set behaves upper semicontinuously with respect to weak convergence:
if $\mu_n \to \mu$, then $\limsup \mu_n(F) \leq \mu(F)$ (and the inequality can be strict, think of
the situation where $\mu$ is a Dirac mass at $0$ and $F = \{0\}$, but $\mu_n$ has a density so that
$\mu_n(\{0\}) = 0$).›
lemma closed_set_weak_conv_usc:
assumes "closed S" "measure M S < l"
shows "eventually (λn. measure (μ n) S < l) sequentially"
proof (cases "S = {}")
case True
then show ?thesis
using ‹measure M S < l› by auto
next
case False
interpret real_distribution M using M by simp
define epsilon where "epsilon = l - measure M S"
have "epsilon > 0" unfolding epsilon_def using assms(2) by auto
obtain r where r: "r > 0" "r < epsilon" "measure M {x. infdist x S = r} = 0" "measure M {x. infdist x S ≤ r} < measure M S + epsilon"
using approx_closed_set_with_set_zero_measure_boundary[OF ‹closed S› ‹epsilon > 0› ‹S ≠ {}›] by blast
define T where "T = {x. infdist x S ≤ r}"
have [measurable]: "T ∈ sets borel"
unfolding T_def by auto
have "S ⊆ T"
unfolding T_def using ‹closed S› ‹r > 0› by auto
have "measure M T < l"
using r(4) unfolding T_def epsilon_def by auto
have "measure M (frontier T) ≤ measure M {x. infdist x S = r}"
apply (rule finite_measure_mono) unfolding T_def using frontier_indist_le by auto
then have "measure M (frontier T) = 0"
using ‹measure M {x. infdist x S = r} = 0› by (auto simp add: measure_le_0_iff)
then have "(λn. measure (μ n) T) ⇢ measure M T"
using μ_to_M by (simp add: μ emeasure_eq_measure real_distribution_axioms weak_conv_imp_continuity_set_conv)
then have *: "eventually (λn. measure (μ n) T < l) sequentially"
apply (rule order_tendstoD) using ‹measure M T < l› by simp
have **: "measure (μ n) S ≤ measure (μ n) T" for n
apply (rule finite_measure.finite_measure_mono)
using μ apply (simp add: finite_borel_measure.axioms(1) real_distribution.finite_borel_measure_M)
using ‹S ⊆ T› apply simp
by (simp add: μ real_distribution.events_eq_borel)
show ?thesis
apply (rule eventually_mono[OF *]) using ** le_less_trans by auto
qed
text ‹In the same way, the measure of an open set behaves lower semicontinuously with respect to
weak convergence: if $\mu_n \to \mu$, then $\liminf \mu_n(U) \geq \mu(U)$ (and the inequality can be
strict). This follows from the same statement for closed sets by passing to the complement.›
lemma open_set_weak_conv_lsc:
assumes "open S" "measure M S > l"
shows "eventually (λn. measure (μ n) S > l) sequentially"
proof -
interpret real_distribution M
using M by auto
have [measurable]: "S ∈ events" using assms(1) by auto
have "eventually (λn. measure (μ n) (UNIV - S) < 1 - l) sequentially"
apply (rule closed_set_weak_conv_usc)
using assms prob_compl[of S] by auto
moreover have "measure (μ n) (UNIV - S) = 1 - measure (μ n) S" for n
proof -
interpret mu: real_distribution "μ n"
using μ by auto
have "S ∈ mu.events" using assms(1) by auto
then show ?thesis using mu.prob_compl[of S] by auto
qed
ultimately show ?thesis by auto
qed
end
end
Theory ME_Library_Complement
theory ME_Library_Complement
imports "HOL-Analysis.Analysis"
begin
subsection ‹The trivial measurable space›
text ‹
The trivial measurable space is the smallest possible ‹σ›-algebra, i.e. only the empty set
and everything.
›
definition trivial_measure :: "'a set ⇒ 'a measure" where
"trivial_measure X = sigma X {{}, X}"
lemma space_trivial_measure [simp]: "space (trivial_measure X) = X"
by (simp add: trivial_measure_def)
lemma sets_trivial_measure: "sets (trivial_measure X) = {{}, X}"
by (simp add: trivial_measure_def sigma_algebra_trivial sigma_algebra.sigma_sets_eq)
lemma measurable_trivial_measure:
assumes "f ∈ space M → X" and "f -` X ∩ space M ∈ sets M"
shows "f ∈ M →⇩M trivial_measure X"
using assms unfolding measurable_def by (auto simp: sets_trivial_measure)
lemma measurable_trivial_measure_iff:
"f ∈ M →⇩M trivial_measure X ⟷ f ∈ space M → X ∧ f -` X ∩ space M ∈ sets M"
unfolding measurable_def by (auto simp: sets_trivial_measure)
subsection ‹Pullback algebras›
text ‹
The pullback algebra $f^{-1}(\Sigma)$ of a ‹σ›-algebra $(\Omega, \Sigma)$ is the smallest
‹σ›-algebra such that $f$ is $f^{-1}(\Sigma)--\Sigma$-measurable.
›
definition (in sigma_algebra) pullback_algebra :: "('b ⇒ 'a) ⇒ 'b set ⇒ 'b set set" where
"pullback_algebra f Ω' = sigma_sets Ω' {f -` A ∩ Ω' |A. A ∈ M}"
lemma pullback_algebra_minimal:
assumes "f ∈ M →⇩M N"
shows "sets.pullback_algebra N f (space M) ⊆ sets M"
proof
fix X assume "X ∈ sets.pullback_algebra N f (space M)"
thus "X ∈ sets M"
unfolding sets.pullback_algebra_def
by induction (use assms in ‹auto simp: measurable_def›)
qed
lemma (in sigma_algebra) in_pullback_algebra: "A ∈ M ⟹ f -` A ∩ Ω' ∈ pullback_algebra f Ω'"
unfolding pullback_algebra_def by (rule sigma_sets.Basic) auto
endTheory Fekete
section ‹Subadditive and submultiplicative sequences›
theory Fekete
imports "HOL-Analysis.Multivariate_Analysis"
begin
text ‹A real sequence is subadditive if $u_{n+m} \leq u_n+u_m$. This implies the
convergence of $u_n/n$ to $Inf\{u_n/n\} \in [-\infty, +\infty)$, a useful result known
as Fekete lemma. We prove it below.
Taking logarithms, the same result applies to submultiplicative sequences. We illustrate
it with the definition of the spectral radius as the limit of $\|x^n\|^{1/n}$, the
convergence following from Fekete lemma.›
subsection ‹Subadditive sequences›
text ‹We define subadditive sequences, either from the start or eventually.›
definition subadditive::"(nat⇒real) ⇒ bool"
where "subadditive u = (∀m n. u (m+n) ≤ u m + u n)"
lemma subadditiveI:
assumes "⋀m n. u (m+n) ≤ u m + u n"
shows "subadditive u"
unfolding subadditive_def using assms by auto
lemma subadditiveD:
assumes "subadditive u"
shows "u (m+n) ≤ u m + u n"
using assms unfolding subadditive_def by auto
lemma subadditive_un_le_nu1:
assumes "subadditive u"
"n > 0"
shows "u n ≤ n * u 1"
proof -
have *: "n = 0 ∨ (u n ≤ n * u 1)" for n
proof (induction n)
case 0
then show ?case by auto
next
case (Suc n)
consider "n = 0" | "n > 0" by auto
then show ?case
proof (cases)
case 1
then show ?thesis by auto
next
case 2
then have "u (Suc n) ≤ u n + u 1" using subadditiveD[OF assms(1), of n 1] by auto
then show ?thesis using Suc.IH 2 by (auto simp add: algebra_simps)
qed
qed
show ?thesis using *[of n] ‹n > 0› by auto
qed
definition eventually_subadditive::"(nat⇒real) ⇒ nat ⇒ bool"
where "eventually_subadditive u N0 = (∀m>N0. ∀n>N0. u (m+n) ≤ u m + u n)"
lemma eventually_subadditiveI:
assumes "⋀m n. m > N0 ⟹ n > N0 ⟹ u (m+n) ≤ u m + u n"
shows "eventually_subadditive u N0"
unfolding eventually_subadditive_def using assms by auto
lemma subadditive_imp_eventually_subadditive:
assumes "subadditive u"
shows "eventually_subadditive u 0"
using assms unfolding subadditive_def eventually_subadditive_def by auto
text ‹The main inequality that will lead to convergence is given in the next lemma:
given $n$, then eventually $u_m/m$ is bounded by $u_n/n$, up to an arbitrarily small error.
This is proved by doing the euclidean division of $m$ by $n$ and using the subadditivity.
(the remainder in the euclidean division will give the error term.)›
lemma eventually_subadditive_ineq:
assumes "eventually_subadditive u N0" "e>0" "n>N0"
shows "∃N>N0. ∀m≥N. u m/m < u n/n + e"
proof -
have ineq_rec: "u(a*n+r) ≤ a * u n + u r" if "n>N0" "r>N0" for a n r
proof (induct a)
case (Suc a)
have "a*n+r>N0" using ‹r>N0› by simp
have "u((Suc a)*n+r) = u(a*n+r+n)" by (simp add: algebra_simps)
also have "... ≤ u(a*n+r)+u n" using assms ‹n>N0› ‹a*n+r>N0› eventually_subadditive_def by blast
also have "... ≤ a*u n + u r + u n" by (simp add: Suc.hyps)
also have "... = (Suc a) * u n + u r" by (simp add: algebra_simps)
finally show ?case by simp
qed (simp)
have "n>0" "real n > 0" using ‹n>N0› by auto
define C where "C = Max {abs(u i) |i. i≤2*n}"
have ineq_C: "abs(u i) ≤ C" if "i ≤ 2 * n" for i
unfolding C_def by (intro Max_ge, auto simp add: that)
have ineq_all_m: "u m/m ≤ u n/n + 3*C/m" if "m≥n" for m
proof -
have "real m>0" using ‹m≥n› ‹0 < real n› by linarith
obtain a0 r0 where "r0<n" "m = a0*n+r0"
using ‹0 < n› mod_div_decomp mod_less_divisor by blast
define a where "a = a0-1"
define r where "r = r0+n"
have "r<2*n" "r≥n" unfolding r_def by (auto simp add: ‹r0<n›)
have "a0>0" using ‹m = a0*n + r0› ‹n ≤ m› ‹r0 < n› not_le by fastforce
then have "m = a * n + r" using a_def r_def ‹m = a0*n+r0› mult_eq_if by auto
then have real_eq: "-r = real n * a - m" by simp
have "r>N0" using ‹r≥n› ‹n>N0› by simp
then have "u m ≤ a * u n + u r" using ineq_rec ‹m = a*n+r› ‹n>N0› by simp
then have "n * u m ≤ n * (a * u n + u r)" using ‹real n>0› by simp
then have "n * u m - m * u n ≤ -r * u n + n * u r"
unfolding real_eq by (simp add: algebra_simps)
also have "... ≤ r * abs(u n) + n * abs(u r)"
apply (intro add_mono mult_left_mono) using real_0_le_add_iff by fastforce+
also have "... ≤ (2 * n) * C + n * C"
apply (intro add_mono mult_mono ineq_C) using less_imp_le[OF ‹r < 2 * n›] by auto
finally have "n * u m - m * u n ≤ 3*C*n" by auto
then show "u m/m ≤ u n/n + 3*C/m"
using ‹0 < real n› ‹0 < real m› by (simp add: divide_simps mult.commute)
qed
obtain M::nat where M: "M ≥ 3 * C / e" using real_nat_ceiling_ge by auto
define N where "N = M + n + N0 + 1"
have "N > 3 * C / e" "N ≥ n" "N > N0" unfolding N_def using M by auto
have "u m/m < u n/n + e" if "m ≥ N" for m
proof -
have "3 * C / m < e"
using that ‹N > 3 * C / e› ‹e > 0› apply (auto simp add: algebra_simps divide_simps)
by (meson le_less_trans linorder_not_le mult_less_cancel_left_pos of_nat_less_iff)
then show ?thesis using ineq_all_m[of m] ‹n ≤ N› ‹N ≤ m› by auto
qed
then show ?thesis using ‹N0 < N› by blast
qed
text ‹From the inequality above, we deduce the convergence of $u_n/n$ to its infimum. As this
infimum might be $-\infty$, we formulate this convergence in the extended reals. Then, we
specialize it to the real situation, separating the cases where $u_n/n$ is bounded below or not.›
lemma subadditive_converges_ereal':
assumes "eventually_subadditive u N0"
shows "(λm. ereal(u m/m)) ⇢ Inf {ereal(u n/n) | n. n>N0}"
proof -
define v where "v = (λm. ereal(u m/m))"
define V where "V = {v n | n. n>N0}"
define l where "l = Inf V"
have "⋀t. t∈V ⟹ t≥l" by (simp add: Inf_lower l_def)
then have "v n ≥ l" if "n > N0" for n using V_def that by blast
then have lower: "eventually (λn. a < v n) sequentially" if "a < l" for a
by (meson that dual_order.strict_trans1 eventually_at_top_dense)
have upper: "eventually (λn. a > v n) sequentially" if "a > l" for a
proof -
obtain t where "t∈V" "t<a" by (metis ‹a>l› Inf_greatest l_def not_le)
then obtain e::real where "e>0" "t+e < a" by (meson ereal_le_epsilon2 leD le_less_linear)
obtain n where "n>N0" "t = u n/n" using V_def v_def ‹t ∈ V› by blast
then have "u n/n + e < a" using ‹t+e < a› by simp
obtain N where "∀m≥N. u m/m < u n/n + e"
using eventually_subadditive_ineq[OF assms] ‹0 < e› ‹N0 < n› by blast
then have "u m/m < a" if "m ≥ N" for m
using that ‹u n/n + e < a› less_ereal.simps(1) less_trans by blast
then have "v m< a" if "m ≥ N" for m using v_def that by blast
then show ?thesis using eventually_at_top_linorder by auto
qed
show ?thesis
using lower upper unfolding V_def l_def v_def by (simp add: order_tendsto_iff)
qed
lemma subadditive_converges_ereal:
assumes "subadditive u"
shows "(λm. ereal(u m/m)) ⇢ Inf {ereal(u n/n) | n. n>0}"
by (rule subadditive_converges_ereal'[OF subadditive_imp_eventually_subadditive[OF assms]])
lemma subadditive_converges_bounded':
assumes "eventually_subadditive u N0"
"bdd_below {u n/n | n. n>N0}"
shows "(λn. u n/n) ⇢ Inf {u n/n | n. n>N0}"
proof-
have *: "(λn. ereal(u n /n)) ⇢ Inf {ereal(u n/n)|n. n > N0}"
by (simp add: assms(1) subadditive_converges_ereal')
define V where "V = {u n/n | n. n>N0}"
have a: "bdd_below V" "V≠{}" by (auto simp add: V_def assms(2))
have "Inf {ereal(t)| t. t∈V} = ereal(Inf V)" by (subst ereal_Inf'[OF a], simp add: Setcompr_eq_image)
moreover have "{ereal(t)| t. t∈V} = {ereal(u n/n)|n. n > N0}" using V_def by blast
ultimately have "Inf {ereal(u n/n)|n. n > N0} = ereal(Inf {u n/n |n. n > N0})" using V_def by auto
then have "(λn. ereal(u n /n)) ⇢ ereal(Inf {u n/n | n. n>N0})" using * by auto
then show ?thesis by simp
qed
lemma subadditive_converges_bounded:
assumes "subadditive u"
"bdd_below {u n/n | n. n>0}"
shows "(λn. u n/n) ⇢ Inf {u n/n | n. n>0}"
by (rule subadditive_converges_bounded'[OF subadditive_imp_eventually_subadditive[OF assms(1)] assms(2)])
text ‹We reformulate the previous lemma in a more directly usable form, avoiding the infimum.›
lemma subadditive_converges_bounded'':
assumes "subadditive u"
"⋀n. n > 0 ⟹ u n ≥ n * (a::real)"
shows "∃l. (λn. u n / n) ⇢ l ∧ (∀n>0. u n ≥ n * l)"
proof -
have B: "bdd_below {u n/n | n. n>0}"
apply (rule bdd_belowI[of _ a]) using assms(2)
apply (auto simp add: divide_simps)
apply (metis mult.commute mult_left_le_imp_le of_nat_0_less_iff)
done
define l where "l = Inf {u n/n | n. n>0}"
have *: "u n / n ≥ l" if "n > 0" for n
unfolding l_def using that by (auto intro!: cInf_lower[OF _ B])
show ?thesis
apply (rule exI[of _ l], auto)
using subadditive_converges_bounded[OF assms(1) B] apply (simp add: l_def)
using * by (simp add: divide_simps algebra_simps)
qed
lemma subadditive_converges_unbounded':
assumes "eventually_subadditive u N0"
"¬ (bdd_below {u n/n | n. n>N0})"
shows "(λn. ereal(u n/n)) ⇢ -∞"
proof -
have *: "(λn. ereal(u n /n)) ⇢ Inf {ereal(u n/n)|n. n > N0}"
by (simp add: assms(1) subadditive_converges_ereal')
define V where "V = {u n/n | n. n>N0}"
then have "¬ bdd_below V" using assms by simp
have "Inf {ereal(t) | t. t∈V} = -∞"
by (rule ereal_bot, metis (mono_tags, lifting) ‹¬ bdd_below V› bdd_below_def
leI Inf_lower2 ereal_less_eq(3) le_less mem_Collect_eq)
moreover have "{ereal(t)| t. t∈V} = {ereal(u n/n)|n. n > N0}" using V_def by blast
ultimately have "Inf {ereal(u n/n)|n. n > N0} = -∞" by auto
then show ?thesis using * by simp
qed
lemma subadditive_converges_unbounded:
assumes "subadditive u"
"¬ (bdd_below {u n/n | n. n>0})"
shows "(λn. ereal(u n/n)) ⇢ -∞"
by (rule subadditive_converges_unbounded'[OF subadditive_imp_eventually_subadditive[OF assms(1)] assms(2)])
subsection ‹Superadditive sequences›
text ‹While most applications involve subadditive sequences, one sometimes encounters superadditive
sequences. We reformulate quickly some of the above results in this setting.›
definition superadditive::"(nat⇒real) ⇒ bool"
where "superadditive u = (∀m n. u (m+n) ≥ u m + u n)"
lemma subadditive_of_superadditive:
assumes "superadditive u"
shows "subadditive (λn. -u n)"
using assms unfolding superadditive_def subadditive_def by (auto simp add: algebra_simps)
lemma superadditive_un_ge_nu1:
assumes "superadditive u"
"n > 0"
shows "u n ≥ n * u 1"
using subadditive_un_le_nu1[OF subadditive_of_superadditive[OF assms(1)] assms(2)] by auto
lemma superadditive_converges_bounded'':
assumes "superadditive u"
"⋀n. n > 0 ⟹ u n ≤ n * (a::real)"
shows "∃l. (λn. u n / n) ⇢ l ∧ (∀n>0. u n ≤ n * l)"
proof -
have "∃l. (λn. -u n / n) ⇢ l ∧ (∀n>0. -u n ≥ n * l)"
apply (rule subadditive_converges_bounded''[OF subadditive_of_superadditive[OF assms(1)], of "-a"])
using assms(2) by auto
then obtain l where l: "(λn. -u n / n) ⇢ l" "(∀n>0. -u n ≥ n * l)" by blast
have "(λn. -((-u n)/n)) ⇢ -l"
by (intro tendsto_intros l)
moreover have "∀n>0. u n ≤ n * (-l)"
using l(2) by (auto simp add: algebra_simps) (metis minus_equation_iff neg_le_iff_le)
ultimately show ?thesis
by auto
qed
subsection ‹Almost additive sequences›
text ‹One often encounters sequences which are both subadditive and superadditive, but only up
to an additive constant. Adding or subtracting this constant, one can make the sequence
genuinely subadditive or superadditive, and thus deduce results about its convergence, as follows.
Such sequences appear notably when dealing with quasimorphisms.›
lemma almost_additive_converges:
fixes u::"nat ⇒ real"
assumes "⋀m n. abs(u(m+n) - u m - u n) ≤ C"
shows "convergent (λn. u n/n)"
"abs(u k - k * lim (λn. u n / n)) ≤ C"
proof -
have "(abs (u 0)) ≤ C" using assms[of 0 0] by auto
then have "C ≥ 0" by auto
define v where "v = (λn. u n + C)"
have "subadditive v"
unfolding subadditive_def v_def using assms by (auto simp add: algebra_simps abs_diff_le_iff)
then have vle: "v n ≤ n * v 1" if "n > 0" for n
using subadditive_un_le_nu1 that by auto
define w where "w = (λn. u n - C)"
have "superadditive w"
unfolding superadditive_def w_def using assms by (auto simp add: algebra_simps abs_diff_le_iff)
then have wge: "w n ≥ n * w 1" if "n > 0" for n
using superadditive_un_ge_nu1 that by auto
have I: "v n ≥ w n" for n
unfolding v_def w_def using ‹C ≥ 0› by auto
then have *: "v n ≥ n * w 1" if "n > 0" for n using order_trans[OF wge[OF that]] by auto
then obtain lv where lv: "(λn. v n/n) ⇢ lv" "⋀n. n > 0 ⟹ v n ≥ n * lv"
using subadditive_converges_bounded''[OF ‹subadditive v› *] by auto
have *: "w n ≤ n * v 1" if "n > 0" for n using order_trans[OF _ vle[OF that]] I by auto
then obtain lw where lw: "(λn. w n/n) ⇢ lw" "⋀n. n > 0 ⟹ w n ≤ n * lw"
using superadditive_converges_bounded''[OF ‹superadditive w› *] by auto
have *: "v n/n = w n /n + 2*C*(1/n)" for n
unfolding v_def w_def by (auto simp add: algebra_simps divide_simps)
have "(λn. w n /n + 2*C*(1/n)) ⇢ lw + 2*C*0"
by (intro tendsto_add tendsto_mult lim_1_over_n lw, auto)
then have "lw = lv"
unfolding *[symmetric] using lv(1) LIMSEQ_unique by auto
have *: "u n/n = w n /n + C*(1/n)" for n
unfolding w_def by (auto simp add: algebra_simps divide_simps)
have "(λn. u n /n) ⇢ lw + C*0"
unfolding * by (intro tendsto_add tendsto_mult lim_1_over_n lw, auto)
then have lu: "convergent (λn. u n/n)" "lim (λn. u n/n) = lw"
by (auto simp add: convergentI limI)
then show "convergent (λn. u n/n)" by simp
show "abs(u k - k * lim (λn. u n / n)) ≤ C"
proof (cases "k>0")
case False
then show ?thesis using assms[of 0 0] by auto
next
case True
have "u k - k * lim (λn. u n/n) = v k - C - k * lv" unfolding lu(2) ‹lw = lv› v_def by auto
also have "... ≥ -C" using lv(2)[OF True] by auto
finally have A: "u k - k * lim (λn. u n/n) ≥ - C" by simp
have "u k - k * lim (λn. u n/n) = w k + C - k * lw" unfolding lu(2) w_def by auto
also have "... ≤ C" using lw(2)[OF True] by auto
finally show ?thesis using A by auto
qed
qed
subsection ‹Submultiplicative sequences, application to the spectral radius›
text ‹In the same way as subadditive sequences, one may define submultiplicative sequences.
Essentially, a sequence is submultiplicative if its logarithm is subadditive. A difference is
that we allow a submultiplicative sequence to take the value $0$, as this shows up in applications.
This implies that we have to distinguish in the proofs the situations where the value $0$
is taken or not. In the latter situation, we can use directly the results from the
subadditive case to deduce convergence. In the former situation, convergence to $0$ is obvious
as the sequence vanishes eventually.›
lemma submultiplicative_converges:
fixes u::"nat⇒real"
assumes "⋀n. u n ≥ 0"
"⋀m n. u (m+n) ≤ u m * u n"
shows "(λn. root n (u n))⇢ Inf {root n (u n) | n. n>0}"
proof -
define v where "v = (λ n. root n (u n))"
define V where "V = {v n | n. n>0}"
then have "V ≠ {}" by blast
have "t ≥ 0" if "t ∈ V" for t using that V_def v_def assms(1) by auto
then have "Inf V ≥ 0" by (simp add: ‹V ≠ {}› cInf_greatest)
have "bdd_below V" by (meson ‹⋀t. t ∈ V ⟹ 0 ≤ t› bdd_below_def)
show ?thesis
proof cases
assume "∃n. u n = 0"
then obtain n where "u n = 0" by auto
then have "u m = 0" if "m ≥ n" for m by (metis that antisym_conv assms(1) assms(2) le_Suc_ex mult_zero_left)
then have *: "v m = 0" if "m ≥ n" for m using v_def that by simp
then have "v ⇢ 0" using lim_explicit by force
have "v (Suc n) ∈ V" using V_def by blast
moreover have "v (Suc n) = 0" using * by auto
ultimately have "Inf V ≤ 0" by (simp add: ‹bdd_below V› cInf_lower)
then have "Inf V = 0" using ‹0 ≤ Inf V› by auto
then show ?thesis using V_def v_def ‹v ⇢ 0› by auto
next
assume "¬ (∃n. u n = 0)"
then have "u n > 0" for n by (metis assms(1) less_eq_real_def)
define w where "w n = ln (u n)" for n
have express_vn: "v n = exp(w n/n)" if "n>0" for n
proof -
have "(exp(w n/n))^n = exp(n*(w n/n))" by (metis exp_of_nat_mult)
also have "... = exp(w n)" by (simp add: ‹0 < n›)
also have "... = u n" by (simp add: ‹⋀n. 0 < u n› w_def)
finally have "exp(w n/n) = root n (u n)" by (metis ‹0 < n› exp_ge_zero real_root_power_cancel)
then show ?thesis unfolding v_def by simp
qed
have "eventually_subadditive w 0"
proof (rule eventually_subadditiveI)
fix m n
have "w (m+n) = ln (u (m+n))" by (simp add: w_def)
also have "... ≤ ln(u m * u n)"
by (meson ‹⋀n. 0 < u n› assms(2) zero_less_mult_iff ln_le_cancel_iff)
also have "... = ln(u m) + ln(u n)" by (simp add: ‹⋀n. 0 < u n› ln_mult)
also have "... = w m + w n" by (simp add: w_def)
finally show "w (m+n) ≤ w m + w n".
qed
define l where "l = Inf V"
then have "v n≥l" if "n > 0" for n
using V_def that by (metis (mono_tags, lifting) ‹bdd_below V› cInf_lower mem_Collect_eq)
then have lower: "eventually (λn. a < v n) sequentially" if "a < l" for a
by (meson that dual_order.strict_trans1 eventually_at_top_dense)
have upper: "eventually (λn. a > v n) sequentially" if "a > l" for a
proof -
obtain t where "t∈V" "t < a" using ‹V ≠ {}› cInf_lessD l_def ‹a>l› by blast
then have "t > 0" using V_def ‹⋀n. 0 < u n› v_def by auto
then have "a/t > 1" using ‹t<a› by simp
define e where "e = ln(a/t)/2"
have "e > 0" "e < ln(a/t)" unfolding e_def by (simp_all add: ‹1 < a / t› ln_gt_zero)
then have "exp(e) < a/t" by (metis ‹1 < a / t› exp_less_cancel_iff exp_ln less_trans zero_less_one)
obtain n where "n>0" "t = v n" using V_def v_def ‹t ∈ V› by blast
with ‹0 < t› have "v n * exp(e) < a" using ‹exp(e) < a/t›
by (auto simp add: field_simps)
obtain N where *: "N>0" "⋀m. m≥N ⟹ w m/m < w n/n + e"
using eventually_subadditive_ineq[OF ‹eventually_subadditive w 0›] ‹0 < n› ‹e>0› by blast
have "v m < a" if "m ≥ N" for m
proof -
have "m>0" using that ‹N>0› by simp
have "w m/m < w n/n + e" by (simp add: ‹N ≤ m› *)
then have "exp(w m/m) < exp(w n/n + e)" by simp
also have "... = exp(w n/n) * exp(e)" by (simp add: mult_exp_exp)
finally have "v m < v n * exp(e)" using express_vn ‹m>0› ‹n>0› by simp
then show "v m < a" using ‹v n * exp(e) < a› by simp
qed
then show ?thesis using eventually_at_top_linorder by auto
qed
show ?thesis
using lower upper unfolding v_def l_def V_def by (simp add: order_tendsto_iff)
qed
qed
text ‹An important application of submultiplicativity is to prove the existence of the
spectral radius of a matrix, as the limit of $\|A^n\|^{1/n}$.›
definition spectral_radius::"'a::real_normed_algebra_1 ⇒ real"
where "spectral_radius x = Inf {root n (norm(x^n))| n. n>0}"
lemma spectral_radius_aux:
fixes x::"'a::real_normed_algebra_1"
defines "V ≡ {root n (norm(x^n))| n. n>0}"
shows "⋀t. t∈V ⟹ t ≥ spectral_radius x"
"⋀t. t∈V ⟹ t ≥ 0"
"bdd_below V"
"V ≠ {}"
"Inf V ≥ 0"
proof -
show "V≠{}" using V_def by blast
show *: "t ≥ 0" if "t ∈ V" for t using that unfolding V_def using real_root_pos_pos_le by auto
then show "bdd_below V" by (meson bdd_below_def)
then show "Inf V ≥ 0" by (simp add: ‹V ≠ {}› * cInf_greatest)
show "⋀t. t∈V ⟹ t ≥ spectral_radius x" by (metis (mono_tags, lifting) ‹bdd_below V› assms cInf_lower spectral_radius_def)
qed
lemma spectral_radius_nonneg [simp]:
"spectral_radius x ≥ 0"
by (simp add: spectral_radius_aux(5) spectral_radius_def)
lemma spectral_radius_upper_bound [simp]:
"(spectral_radius x)^n ≤ norm(x^n)"
proof (cases)
assume "¬(n = 0)"
have "root n (norm(x^n)) ≥ spectral_radius x"
using spectral_radius_aux ‹n ≠ 0› by auto
then show ?thesis
by (metis ‹n ≠ 0› spectral_radius_nonneg norm_ge_zero not_gr0 power_mono real_root_pow_pos2)
qed (simp)
lemma spectral_radius_limit:
"(λn. root n (norm(x^n))) ⇢ spectral_radius x"
proof -
have "norm(x^(m+n)) ≤ norm(x^m) * norm(x^n)" for m n by (simp add: power_add norm_mult_ineq)
then show ?thesis unfolding spectral_radius_def using submultiplicative_converges by auto
qed
end
Theory Asymptotic_Density
section ‹Asymptotic densities›
theory Asymptotic_Density
imports SG_Library_Complement
begin
text ‹The upper asymptotic density of a subset $A$ of the integers is
$\limsup Card(A \cap [0,n)) / n \in [0,1]$. It measures how big a set of integers is,
at some times. In this paragraph, we establish the basic properties of this notion.
There is a corresponding notion of lower asymptotic density, with a liminf instead
of a limsup, measuring how big a set is at all times. The corresponding properties
are proved exactly in the same way.
›
subsection ‹Upper asymptotic densities›
text ‹As limsups are only defined for sequences taking values in a complete lattice
(here the extended reals), we define it in the extended reals and then go back to the reals.
This is a little bit artificial, but it is not a real problem as in the applications we
will never come back to this definition.›
definition upper_asymptotic_density::"nat set ⇒ real"
where "upper_asymptotic_density A = real_of_ereal(limsup (λn. card(A ∩ {..<n})/n))"
text ‹First basic property: the asymptotic density is between $0$ and $1$.›
lemma upper_asymptotic_density_in_01:
"ereal(upper_asymptotic_density A) = limsup (λn. card(A ∩ {..<n})/n)"
"upper_asymptotic_density A ≤ 1"
"upper_asymptotic_density A ≥ 0"
proof -
{
fix n::nat assume "n>0"
have "card(A ∩ {..<n}) ≤ n" by (metis card_lessThan Int_lower2 card_mono finite_lessThan)
then have "card(A ∩ {..<n}) / n ≤ ereal 1" using ‹n>0› by auto
}
then have "eventually (λn. card(A ∩ {..<n}) / n ≤ ereal 1) sequentially"
by (simp add: eventually_at_top_dense)
then have a: "limsup (λn. card(A ∩ {..<n})/n) ≤ 1" by (simp add: Limsup_const Limsup_bounded)
have "card(A ∩ {..<n}) / n ≥ ereal 0" for n by auto
then have "liminf (λn. card(A ∩ {..<n})/n) ≥ 0" by (simp add: le_Liminf_iff less_le_trans)
then have b: "limsup (λn. card(A ∩ {..<n})/n) ≥ 0" by (meson Liminf_le_Limsup order_trans sequentially_bot)
have "abs(limsup (λn. card(A ∩ {..<n})/n)) ≠ ∞" using a b by auto
then show "ereal(upper_asymptotic_density A) = limsup (λn. card(A ∩ {..<n})/n)"
unfolding upper_asymptotic_density_def by auto
show "upper_asymptotic_density A ≤ 1" "upper_asymptotic_density A ≥ 0" unfolding upper_asymptotic_density_def
using a b by (auto simp add: real_of_ereal_le_1 real_of_ereal_pos)
qed
text ‹The two next propositions give the usable characterization of the asymptotic density, in
terms of the eventual cardinality of $A \cap [0, n)$. Note that the inequality is strict for one
implication and large for the other.›
proposition upper_asymptotic_densityD:
fixes l::real
assumes "upper_asymptotic_density A < l"
shows "eventually (λn. card(A ∩ {..<n}) < l * n) sequentially"
proof -
have "limsup (λn. card(A ∩ {..<n})/n) < l"
using assms upper_asymptotic_density_in_01(1) ereal_less_ereal_Ex by auto
then have "eventually (λn. card(A ∩ {..<n})/n < ereal l) sequentially"
using Limsup_lessD by blast
then have "eventually (λn. card(A ∩ {..<n})/n < ereal l ∧ n > 0) sequentially"
using eventually_gt_at_top eventually_conj by blast
moreover have "card(A ∩ {..<n}) < l * n" if "card(A ∩ {..<n})/n < ereal l ∧ n > 0" for n
using that by (simp add: divide_less_eq)
ultimately show "eventually (λn. card(A ∩ {..<n}) < l * n) sequentially"
by (simp add: eventually_mono)
qed
proposition upper_asymptotic_densityI:
fixes l::real
assumes "eventually (λn. card(A ∩ {..<n}) ≤ l * n) sequentially"
shows "upper_asymptotic_density A ≤ l"
proof -
have "eventually (λn. card(A ∩ {..<n}) ≤ l * n ∧ n > 0) sequentially"
using assms eventually_gt_at_top eventually_conj by blast
moreover have "card(A ∩ {..<n})/n ≤ ereal l" if "card(A ∩ {..<n}) ≤ l * n ∧ n > 0" for n
using that by (simp add: divide_le_eq)
ultimately have "eventually (λn. card(A ∩ {..<n})/n ≤ ereal l) sequentially"
by (simp add: eventually_mono)
then have "limsup (λn. card(A ∩ {..<n})/n) ≤ ereal l"
by (simp add: Limsup_bounded)
then have "ereal(upper_asymptotic_density A) ≤ ereal l"
using upper_asymptotic_density_in_01(1) by auto
then show ?thesis by (simp del: upper_asymptotic_density_in_01)
qed
text ‹The following trivial lemma is useful to control the asymptotic density of unions.›
lemma lem_ge_sum:
fixes l x y::real
assumes "l>x+y"
shows "∃lx ly. l = lx + ly ∧ lx > x ∧ ly > y"
proof -
define lx ly where "lx = x + (l-(x+y))/2" and "ly = y + (l-(x+y))/2"
have "l = lx + ly ∧ lx > x ∧ ly > y" unfolding lx_def ly_def using assms by auto
then show ?thesis by auto
qed
text ‹The asymptotic density of a union is bounded by the sum of the asymptotic densities.›
lemma upper_asymptotic_density_union:
"upper_asymptotic_density (A ∪ B) ≤ upper_asymptotic_density A + upper_asymptotic_density B"
proof -
have "upper_asymptotic_density (A ∪ B) ≤ l" if H: "l > upper_asymptotic_density A + upper_asymptotic_density B" for l
proof -
obtain lA lB where l: "l = lA+lB" and lA: "lA > upper_asymptotic_density A" and lB: "lB > upper_asymptotic_density B"
using lem_ge_sum H by blast
{
fix n assume H: "card (A ∩ {..<n}) < lA * n ∧ card (B ∩ {..<n}) < lB * n"
have "card((A∪B) ∩ {..<n}) ≤ card(A ∩ {..<n}) + card(B ∩ {..<n})"
by (simp add: card_Un_le inf_sup_distrib2)
also have "... ≤ l * n" using l H by (simp add: ring_class.ring_distribs(2))
finally have "card ((A∪B) ∩ {..<n}) ≤ l * n" by simp
}
moreover have "eventually (λn. card (A ∩ {..<n}) < lA * n ∧ card (B ∩ {..<n}) < lB * n) sequentially"
using upper_asymptotic_densityD[OF lA] upper_asymptotic_densityD[OF lB] eventually_conj by blast
ultimately have "eventually (λn. card((A∪B) ∩ {..<n}) ≤ l * n) sequentially"
by (simp add: eventually_mono)
then show "upper_asymptotic_density (A ∪ B) ≤ l" using upper_asymptotic_densityI by auto
qed
then show ?thesis by (meson dense not_le)
qed
text ‹It follows that the asymptotic density is an increasing function for inclusion.›
lemma upper_asymptotic_density_subset:
assumes "A ⊆ B"
shows "upper_asymptotic_density A ≤ upper_asymptotic_density B"
proof -
have "upper_asymptotic_density A ≤ l" if l: "l > upper_asymptotic_density B" for l
proof -
have "card(A ∩ {..<n}) ≤ card(B ∩ {..<n})" for n
using assms by (metis Int_lower2 Int_mono card_mono finite_lessThan finite_subset inf.left_idem)
then have "card(A ∩ {..<n}) ≤ l * n" if "card(B ∩ {..<n}) < l * n" for n
using that by (meson lessThan_def less_imp_le of_nat_le_iff order_trans)
moreover have "eventually (λn. card(B ∩ {..<n}) < l * n) sequentially"
using upper_asymptotic_densityD l by simp
ultimately have "eventually (λn. card(A ∩ {..<n}) ≤ l * n) sequentially"
by (simp add: eventually_mono)
then show ?thesis using upper_asymptotic_densityI by auto
qed
then show ?thesis by (meson dense not_le)
qed
text ‹If a set has a density, then it is also its asymptotic density.›
lemma upper_asymptotic_density_lim:
assumes "(λn. card(A ∩ {..<n})/n) ⇢ l"
shows "upper_asymptotic_density A = l"
proof -
have "(λn. ereal(card(A ∩ {..<n})/n)) ⇢ l" using assms by auto
then have "limsup (λn. card(A ∩ {..<n})/n) = l"
using sequentially_bot tendsto_iff_Liminf_eq_Limsup by blast
then show ?thesis unfolding upper_asymptotic_density_def by auto
qed
text ‹If two sets are equal up to something small, i.e. a set with zero upper density,
then they have the same upper density.›
lemma upper_asymptotic_density_0_diff:
assumes "A ⊆ B" "upper_asymptotic_density (B-A) = 0"
shows "upper_asymptotic_density A = upper_asymptotic_density B"
proof -
have "upper_asymptotic_density B ≤ upper_asymptotic_density A + upper_asymptotic_density (B-A)"
using upper_asymptotic_density_union[of A "B-A"] by (simp add: assms(1) sup.absorb2)
then have "upper_asymptotic_density B ≤ upper_asymptotic_density A"
using assms(2) by simp
then show ?thesis using upper_asymptotic_density_subset[OF assms(1)] by simp
qed
lemma upper_asymptotic_density_0_Delta:
assumes "upper_asymptotic_density (A Δ B) = 0"
shows "upper_asymptotic_density A = upper_asymptotic_density B"
proof -
have "A- (A∩B) ⊆ A Δ B" "B- (A∩B) ⊆ A Δ B"
using assms(1) by (auto simp add: Diff_Int Un_infinite)
then have "upper_asymptotic_density (A - (A∩B)) = 0"
"upper_asymptotic_density (B - (A∩B)) = 0"
using upper_asymptotic_density_subset assms(1) upper_asymptotic_density_in_01(3)
by (metis inf.absorb_iff2 inf.orderE)+
then have "upper_asymptotic_density (A∩B) = upper_asymptotic_density A"
"upper_asymptotic_density (A∩B) = upper_asymptotic_density B"
using upper_asymptotic_density_0_diff by auto
then show ?thesis by simp
qed
text ‹Finite sets have vanishing upper asymptotic density.›
lemma upper_asymptotic_density_finite:
assumes "finite A"
shows "upper_asymptotic_density A = 0"
proof -
have "(λn. card(A ∩ {..<n})/n) ⇢ 0"
proof (rule tendsto_sandwich[where ?f = "λn. 0" and ?h = "λ(n::nat). card A / n"])
have "card(A ∩ {..<n})/n ≤ card A / n" if "n>0" for n
using that ‹finite A› by (simp add: card_mono divide_right_mono)
then show "eventually (λn. card(A ∩ {..<n})/n ≤ card A / n) sequentially"
by (simp add: eventually_at_top_dense)
have "(λn. real (card A)* (1 / real n)) ⇢ real(card A) * 0"
by (intro tendsto_intros)
then show "(λn. real (card A) / real n) ⇢ 0" by auto
qed (auto)
then show "upper_asymptotic_density A = 0" using upper_asymptotic_density_lim by auto
qed
text ‹In particular, bounded intervals have zero upper density.›
lemma upper_asymptotic_density_bdd_interval [simp]:
"upper_asymptotic_density {} = 0"
"upper_asymptotic_density {..N} = 0"
"upper_asymptotic_density {..<N} = 0"
"upper_asymptotic_density {n..N} = 0"
"upper_asymptotic_density {n..<N} = 0"
"upper_asymptotic_density {n<..N} = 0"
"upper_asymptotic_density {n<..<N} = 0"
by (auto intro!: upper_asymptotic_density_finite)
text ‹The density of a finite union is bounded by the sum of the densities.›
lemma upper_asymptotic_density_finite_Union:
assumes "finite I"
shows "upper_asymptotic_density (⋃i∈I. A i) ≤ (∑i∈I. upper_asymptotic_density (A i))"
using assms apply (induction I rule: finite_induct)
using order_trans[OF upper_asymptotic_density_union] by auto
text ‹It is sometimes useful to compute the asymptotic density by shifting a little bit the set:
this only makes a finite difference that vanishes when divided by $n$.›
lemma upper_asymptotic_density_shift:
fixes k::nat and l::int
shows "ereal(upper_asymptotic_density A) = limsup (λn. card(A ∩ {k..nat(n+l)}) / n)"
proof -
define C where "C = k+2*nat(abs(l))+1"
have *: "(λn. C*(1/n)) ⇢ real C * 0"
by (intro tendsto_intros)
have l0: "limsup (λn. C/n) = 0"
apply (rule lim_imp_Limsup, simp) using * by (simp add: zero_ereal_def)
have "card(A ∩ {k..nat(n+l)}) / n ≤ card (A ∩ {..<n})/n + C/n" for n
proof -
have "card(A ∩ {k..nat(n+l)}) ≤ card (A ∩ {..<n} ∪ {n..n + nat(abs(l))})"
by (rule card_mono, auto)
also have "... ≤ card (A ∩ {..<n}) + card {n..n + nat(abs(l))}"
by (rule card_Un_le)
also have "... ≤ card (A ∩ {..<n}) + real C"
unfolding C_def by auto
finally have "card(A ∩ {k..nat(n+l)}) / n ≤ (card (A ∩ {..<n}) + real C) /n"
by (simp add: divide_right_mono)
also have "... = card (A ∩ {..<n})/n + C/n"
using add_divide_distrib by auto
finally show ?thesis
by auto
qed
then have "limsup (λn. card(A ∩ {k..nat(n+l)}) / n) ≤ limsup (λn. card (A ∩ {..<n})/n + ereal(C/n))"
by (simp add: Limsup_mono)
also have "... ≤ limsup (λn. card (A ∩ {..<n})/n) + limsup (λn. C/n)"
by (rule ereal_limsup_add_mono)
finally have a: "limsup (λn. card(A ∩ {k..nat(n+l)}) / n) ≤ limsup (λn. card (A ∩ {..<n})/n)"
using l0 by simp
have "card (A ∩ {..<n}) / n ≤ card (A ∩ {k..nat(n+l)})/n + C/n" for n
proof -
have "card ({..<k} ∪ {n-nat(abs(l))..n + nat(abs(l))}) ≤ card {..<k} + card {n-nat(abs(l))..n + nat(abs(l))}"
by (rule card_Un_le)
also have "... ≤ k + 2*nat(abs(l)) + 1" by auto
finally have *: "card ({..<k} ∪ {n-nat(abs(l))..n + nat(abs(l))}) ≤ C" unfolding C_def by blast
have "card(A ∩ {..<n}) ≤ card (A ∩ {k..nat(n+l)} ∪ ({..<k} ∪ {n-nat(abs(l))..n + nat(abs(l))}))"
by (rule card_mono, auto)
also have "... ≤ card (A ∩ {k..nat(n+l)}) + card ({..<k} ∪ {n-nat(abs(l))..n + nat(abs(l))})"
by (rule card_Un_le)
also have "... ≤ card (A ∩ {k..nat(n+l)}) + C"
using * by auto
finally have "card (A ∩ {..<n}) / n ≤ (card (A ∩ {k..nat(n+l)}) + real C)/n"
by (simp add: divide_right_mono)
also have "... = card (A ∩ {k..nat(n+l)})/n + C/n"
using add_divide_distrib by auto
finally show ?thesis
by auto
qed
then have "limsup (λn. card(A ∩ {..<n}) / n) ≤ limsup (λn. card (A ∩ {k..nat(n+l)})/n + ereal(C/n))"
by (simp add: Limsup_mono)
also have "... ≤ limsup (λn. card (A ∩ {k..nat(n+l)})/n) + limsup (λn. C/n)"
by (rule ereal_limsup_add_mono)
finally have "limsup (λn. card(A ∩ {..<n}) / n) ≤ limsup (λn. card (A ∩ {k..nat(n+l)})/n)"
using l0 by simp
then have "limsup (λn. card(A ∩ {..<n}) / n) = limsup (λn. card (A ∩ {k..nat(n+l)})/n)"
using a by auto
then show ?thesis using upper_asymptotic_density_in_01(1) by auto
qed
text ‹Upper asymptotic density is measurable.›
lemma upper_asymptotic_density_meas [measurable]:
assumes [measurable]: "⋀(n::nat). Measurable.pred M (P n)"
shows "(λx. upper_asymptotic_density {n. P n x}) ∈ borel_measurable M"
unfolding upper_asymptotic_density_def by auto
text ‹A finite union of sets with zero upper density still has zero upper density.›
lemma upper_asymptotic_density_zero_union:
assumes "upper_asymptotic_density A = 0" "upper_asymptotic_density B = 0"
shows "upper_asymptotic_density (A ∪ B) = 0"
using upper_asymptotic_density_in_01(3)[of "A ∪ B"] upper_asymptotic_density_union[of A B] unfolding assms by auto
lemma upper_asymptotic_density_zero_finite_Union:
assumes "finite I" "⋀i. i ∈ I ⟹ upper_asymptotic_density (A i) = 0"
shows "upper_asymptotic_density (⋃i∈I. A i) = 0"
using assms by (induction rule: finite_induct, auto intro!: upper_asymptotic_density_zero_union)
text ‹The union of sets with small asymptotic densities can have a large density: think
of $A_n = [0,n]$, it has density $0$, but the union of the $A_n$ has density $1$. However, if one
only wants a set which contains each $A_n$ eventually, then one can obtain a ``union'' that has
essentially the same density as each $A_n$. This is often used as a replacement for the diagonal
argument in density arguments: if for each $n$ one can find a set $A_n$ with good properties and
a controlled density, then their ``union'' will have the same properties (eventually) and a
controlled density.›
proposition upper_asymptotic_density_incseq_Union:
assumes "⋀(n::nat). upper_asymptotic_density (A n) ≤ l" "incseq A"
shows "∃B. upper_asymptotic_density B ≤ l ∧ (∀n. ∃N. A n ∩ {N..} ⊆ B)"
proof -
have A: "∃N. ∀j ≥ N. card (A k ∩ {..<j}) < (l + (1/2)^k) * j" for k
proof -
have *: "upper_asymptotic_density (A k) < l + (1/2)^k" using assms(1)[of k]
by (metis add.right_neutral add_mono_thms_linordered_field(4) less_divide_eq_numeral1(1) mult_zero_left zero_less_one zero_less_power)
show ?thesis
using upper_asymptotic_densityD[OF *] unfolding eventually_sequentially by auto
qed
have "∃N. ∀k. (∀j ≥ N k. card (A k ∩ {..<j}) ≤ (l+(1/2)^k) * j) ∧ N (Suc k) > N k"
proof (rule dependent_nat_choice)
fix x k::nat
obtain N where N: "∀j≥N. real (card (A (Suc k) ∩ {..<j})) ≤ (l + (1 / 2) ^ Suc k) * real j"
using A[of "Suc k"] less_imp_le by auto
show "∃y. (∀j≥y. real (card (A(Suc k) ∩ {..<j})) ≤ (l + (1 / 2) ^ Suc k) * real j) ∧ x < y"
apply (rule exI[of _ "max x N + 1"]) using N by auto
next
show "∃x. ∀j≥x. real (card ((A 0) ∩ {..<j})) ≤ (l + (1 / 2) ^ 0) * real j"
using A[of 0] less_imp_le by auto
qed
text ‹Here is the choice of the good waiting function $N$›
then obtain N where N: "⋀k j. j ≥ N k ⟹ card (A k ∩ {..<j}) ≤ (l + (1/2)^k) * j" "⋀k. N (Suc k) > N k"
by blast
then have "strict_mono N" by (simp add: strict_monoI_Suc)
have Nmono: "N k < N l" if "k < l" for k l
using N(2) by (simp add: lift_Suc_mono_less that)
text ‹We can now define the global bad set $B$.›
define B where "B = (⋃k. A k ∩ {N k..})"
text ‹We will now show that it also has density at most $l$.›
have Bcard: "card (B ∩ {..<n}) ≤ (l+(1/2)^k) * n" if "N k ≤ n" "n < N (Suc k)" for n k
proof -
have "{N j..<n} = {}" if "j ∈ {k<..}" for j
using ‹n < N (Suc k)› that by (auto, meson ‹strict_mono N› less_trans not_less_eq strict_mono_less)
then have *: "(⋃j∈{k<..}. A j ∩ {N j..<n}) = {}" by force
have "B ∩ {..<n} = (⋃j. A j ∩ {N j..<n})"
unfolding B_def by auto
also have "... = (⋃j ∈ {..k}. A j ∩ {N j..<n}) ∪ (⋃j∈{k<..}. A j ∩ {N j..<n})"
unfolding UN_Un [symmetric] by (rule arg_cong [of _ _ Union]) auto
also have "... = (⋃j ∈ {..k}. A j ∩ {N j..<n})"
unfolding * by simp
also have "... ⊆ (⋃j ∈ {..k}. A k ∩ {..<n})"
using ‹incseq A› unfolding incseq_def by (auto intro!: UN_mono)
also have "... = A k ∩ {..<n}"
by simp
finally have "card (B ∩ {..<n}) ≤ card (A k ∩ {..<n})"
by (rule card_mono[rotated], auto)
then show ?thesis
using N(1)[OF ‹n ≥ N k›] by simp
qed
have "eventually (λn. card (B ∩ {..<n}) ≤ a * n) sequentially" if "l < a" for a::real
proof -
have "eventually (λk. (l+(1/2)^k) < a) sequentially"
apply (rule order_tendstoD[of _ "l+0"], intro tendsto_intros) using that by auto
then obtain k where "l + (1/2)^k < a"
unfolding eventually_sequentially by auto
have "card (B ∩ {..<n}) ≤ a * n" if "n ≥ N k + 1"for n
proof -
have "n ≥ N k" "n ≥ 1" using that by auto
have "{p. n ≥ N p} ⊆ {..n}"
using ‹strict_mono N› dual_order.trans seq_suble by blast
then have *: "finite {p. n ≥ N p}" "{p. n ≥ N p} ≠ {}"
using ‹n ≥ N k› finite_subset by auto
define m where "m = Max {p. n ≥ N p}"
have "k ≤ m"
unfolding m_def using Max_ge[OF *(1), of k] that by auto
have "N m ≤ n"
unfolding m_def using Max_in[OF *] by auto
have "Suc m ∉ {p. n ≥ N p}"
unfolding m_def using * Max_ge Suc_n_not_le_n by blast
then have "n < N (Suc m)" by simp
have "card (B ∩ {..<n}) ≤ (l+(1/2)^m) * n"
using Bcard[OF ‹N m ≤ n› ‹n < N (Suc m)›] by simp
also have "... ≤ (l + (1/2)^k) * n"
apply (rule mult_right_mono) using ‹k ≤ m› by (auto simp add: power_decreasing)
also have "... ≤ a * n"
using ‹l + (1/2)^k < a› ‹n ≥ 1› by auto
finally show ?thesis by auto
qed
then show ?thesis unfolding eventually_sequentially by auto
qed
then have "upper_asymptotic_density B ≤ a" if "a > l" for a
using upper_asymptotic_densityI that by auto
then have "upper_asymptotic_density B ≤ l"
by (meson dense not_le)
moreover have "∃N. A n ∩ {N..} ⊆ B" for n
apply (rule exI[of _ "N n"]) unfolding B_def by auto
ultimately show ?thesis by auto
qed
text ‹When the sequence of sets is not increasing, one can only obtain a set whose density
is bounded by the sum of the densities.›
proposition upper_asymptotic_density_Union:
assumes "summable (λn. upper_asymptotic_density (A n))"
shows "∃B. upper_asymptotic_density B ≤ (∑n. upper_asymptotic_density (A n)) ∧ (∀n. ∃N. A n ∩ {N..} ⊆ B)"
proof -
define C where "C = (λn. (⋃i≤n. A i))"
have C1: "incseq C"
unfolding C_def incseq_def by fastforce
have C2: "upper_asymptotic_density (C k) ≤ (∑n. upper_asymptotic_density (A n))" for k
proof -
have "upper_asymptotic_density (C k) ≤ (∑i≤k. upper_asymptotic_density (A i))"
unfolding C_def by (rule upper_asymptotic_density_finite_Union, auto)
also have "... ≤ (∑i. upper_asymptotic_density (A i))"
apply (rule sum_le_suminf[OF assms]) using upper_asymptotic_density_in_01 by auto
finally show ?thesis by simp
qed
obtain B where B: "upper_asymptotic_density B ≤ (∑n. upper_asymptotic_density (A n))"
"⋀n. ∃N. C n ∩ {N..} ⊆ B"
using upper_asymptotic_density_incseq_Union[OF C2 C1] by blast
have "∃N. A n ∩ {N..} ⊆ B" for n
using B(2)[of n] unfolding C_def by auto
then show ?thesis using B(1) by blast
qed
text ‹A particular case of the previous proposition, often useful, is when all sets have density
zero.›
proposition upper_asymptotic_density_zero_Union:
assumes "⋀n::nat. upper_asymptotic_density (A n) = 0"
shows "∃B. upper_asymptotic_density B = 0 ∧ (∀n. ∃N. A n ∩ {N..} ⊆ B)"
proof -
have "∃B. upper_asymptotic_density B ≤ (∑n. upper_asymptotic_density (A n)) ∧ (∀n. ∃N. A n ∩ {N..} ⊆ B)"
apply (rule upper_asymptotic_density_Union) unfolding assms by auto
then obtain B where "upper_asymptotic_density B ≤ 0" "⋀n. ∃N. A n ∩ {N..} ⊆ B"
unfolding assms by auto
then show ?thesis
using upper_asymptotic_density_in_01(3)[of B] by auto
qed
subsection ‹Lower asymptotic densities›
text ‹The lower asymptotic density of a set of natural numbers is defined just as its
upper asymptotic density but using a liminf instead of a limsup. Its properties are proved
exactly in the same way.›
definition lower_asymptotic_density::"nat set ⇒ real"
where "lower_asymptotic_density A = real_of_ereal(liminf (λn. card(A ∩ {..<n})/n))"
lemma lower_asymptotic_density_in_01:
"ereal(lower_asymptotic_density A) = liminf (λn. card(A ∩ {..<n})/n)"
"lower_asymptotic_density A ≤ 1"
"lower_asymptotic_density A ≥ 0"
proof -
{
fix n::nat assume "n>0"
have "card(A ∩ {..<n}) ≤ n" by (metis card_lessThan Int_lower2 card_mono finite_lessThan)
then have "card(A ∩ {..<n}) / n ≤ ereal 1" using ‹n>0› by auto
}
then have "eventually (λn. card(A ∩ {..<n}) / n ≤ ereal 1) sequentially"
by (simp add: eventually_at_top_dense)
then have "limsup (λn. card(A ∩ {..<n})/n) ≤ 1" by (simp add: Limsup_const Limsup_bounded)
then have a: "liminf (λn. card(A ∩ {..<n})/n) ≤ 1"
by (meson Liminf_le_Limsup less_le_trans not_le sequentially_bot)
have "card(A ∩ {..<n}) / n ≥ ereal 0" for n by auto
then have b: "liminf (λn. card(A ∩ {..<n})/n) ≥ 0" by (simp add: le_Liminf_iff less_le_trans)
have "abs(liminf (λn. card(A ∩ {..<n})/n)) ≠ ∞" using a b by auto
then show "ereal(lower_asymptotic_density A) = liminf (λn. card(A ∩ {..<n})/n)"
unfolding lower_asymptotic_density_def by auto
show "lower_asymptotic_density A ≤ 1" "lower_asymptotic_density A ≥ 0" unfolding lower_asymptotic_density_def
using a b by (auto simp add: real_of_ereal_le_1 real_of_ereal_pos)
qed
text ‹The lower asymptotic density is bounded by the upper one. When they coincide,
$Card(A \cap [0,n))/n$ converges to this common value.›
lemma lower_asymptotic_density_le_upper:
"lower_asymptotic_density A ≤ upper_asymptotic_density A"
using lower_asymptotic_density_in_01(1) upper_asymptotic_density_in_01(1)
by (metis (mono_tags, lifting) Liminf_le_Limsup ereal_less_eq(3) sequentially_bot)
lemma lower_asymptotic_density_eq_upper:
assumes "lower_asymptotic_density A = l" "upper_asymptotic_density A = l"
shows "(λn. card(A ∩ {..<n})/n) ⇢ l"
apply (rule limsup_le_liminf_real)
using upper_asymptotic_density_in_01(1)[of A] lower_asymptotic_density_in_01(1)[of A] assms by auto
text ‹In particular, when a set has a zero upper density, or a lower density one, then this
implies the corresponding convergence of $Card(A \cap [0,n))/n$.›
lemma upper_asymptotic_density_zero_lim:
assumes "upper_asymptotic_density A = 0"
shows "(λn. card(A ∩ {..<n})/n) ⇢ 0"
apply (rule lower_asymptotic_density_eq_upper)
using assms lower_asymptotic_density_le_upper[of A] lower_asymptotic_density_in_01(3)[of A] by auto
lemma lower_asymptotic_density_one_lim:
assumes "lower_asymptotic_density A = 1"
shows "(λn. card(A ∩ {..<n})/n) ⇢ 1"
apply (rule lower_asymptotic_density_eq_upper)
using assms lower_asymptotic_density_le_upper[of A] upper_asymptotic_density_in_01(2)[of A] by auto
text ‹The lower asymptotic density of a set is $1$ minus the upper asymptotic density of its complement.
Hence, most statements about one of them follow from statements about the other one,
although we will rather give direct proofs as they are not more complicated.›
lemma lower_upper_asymptotic_density_complement:
"lower_asymptotic_density A = 1 - upper_asymptotic_density (UNIV - A)"
proof -
{
fix n assume "n>(0::nat)"
have "{..<n} ∩ UNIV - (UNIV - ({..<n} - (UNIV - A))) = {..<n} ∩ A"
by blast
moreover have "{..<n} ∩ UNIV ∩ (UNIV - ({..<n} - (UNIV - A))) = (UNIV - A) ∩ {..<n}"
by blast
ultimately have "card (A ∩ {..<n}) = n - card((UNIV-A) ∩ {..<n})"
by (metis (no_types) Int_commute card_Diff_subset_Int card_lessThan finite_Int finite_lessThan inf_top_right)
then have "card (A ∩ {..<n})/n = (real n - card((UNIV-A) ∩ {..<n})) / n"
by (metis Int_lower2 card_lessThan card_mono finite_lessThan of_nat_diff)
then have "card (A ∩ {..<n})/n = ereal 1 - card((UNIV-A) ∩ {..<n})/n"
using ‹n>0› by (simp add: diff_divide_distrib)
}
then have "eventually (λn. card (A ∩ {..<n})/n = ereal 1 - card((UNIV-A) ∩ {..<n})/n) sequentially"
by (simp add: eventually_at_top_dense)
then have "liminf (λn. card (A ∩ {..<n})/n) = liminf (λn. ereal 1 - card((UNIV-A) ∩ {..<n})/n)"
by (rule Liminf_eq)
also have "... = ereal 1 - limsup (λn. card((UNIV-A) ∩ {..<n})/n)"
by (rule liminf_ereal_cminus, simp)
finally show ?thesis unfolding lower_asymptotic_density_def
by (metis ereal_minus(1) real_of_ereal.simps(1) upper_asymptotic_density_in_01(1))
qed
proposition lower_asymptotic_densityD:
fixes l::real
assumes "lower_asymptotic_density A > l"
shows "eventually (λn. card(A ∩ {..<n}) > l * n) sequentially"
proof -
have "ereal(lower_asymptotic_density A) > l" using assms by auto
then have "liminf (λn. card(A ∩ {..<n})/n) > l"
using lower_asymptotic_density_in_01(1) by auto
then have "eventually (λn. card(A ∩ {..<n})/n > ereal l) sequentially"
using less_LiminfD by blast
then have "eventually (λn. card(A ∩ {..<n})/n > ereal l ∧ n > 0) sequentially"
using eventually_gt_at_top eventually_conj by blast
moreover have "card(A ∩ {..<n}) > l * n" if "card(A ∩ {..<n})/n > ereal l ∧ n > 0" for n
using that divide_le_eq ereal_less_eq(3) less_imp_of_nat_less not_less of_nat_eq_0_iff by fastforce
ultimately show "eventually (λn. card(A ∩ {..<n}) > l * n) sequentially"
by (simp add: eventually_mono)
qed
proposition lower_asymptotic_densityI:
fixes l::real
assumes "eventually (λn. card(A ∩ {..<n}) ≥ l * n) sequentially"
shows "lower_asymptotic_density A ≥ l"
proof -
have "eventually (λn. card(A ∩ {..<n}) ≥ l * n ∧ n > 0) sequentially"
using assms eventually_gt_at_top eventually_conj by blast
moreover have "card(A ∩ {..<n})/n ≥ ereal l" if "card(A ∩ {..<n}) ≥ l * n ∧ n > 0" for n
using that by (meson ereal_less_eq(3) not_less of_nat_0_less_iff pos_divide_less_eq)
ultimately have "eventually (λn. card(A ∩ {..<n})/n ≥ ereal l) sequentially"
by (simp add: eventually_mono)
then have "liminf (λn. card(A ∩ {..<n})/n) ≥ ereal l"
by (simp add: Liminf_bounded)
then have "ereal(lower_asymptotic_density A) ≥ ereal l"
using lower_asymptotic_density_in_01(1) by auto
then show ?thesis by auto
qed
text ‹One can control the asymptotic density of an intersection in terms of the asymptotic density
of each component›
lemma lower_asymptotic_density_intersection:
"lower_asymptotic_density A + lower_asymptotic_density B ≤ lower_asymptotic_density (A ∩ B) + 1"
using upper_asymptotic_density_union[of "UNIV - A" "UNIV - B"]
unfolding lower_upper_asymptotic_density_complement by (auto simp add: algebra_simps Diff_Int)
lemma lower_asymptotic_density_subset:
assumes "A ⊆ B"
shows "lower_asymptotic_density A ≤ lower_asymptotic_density B"
using upper_asymptotic_density_subset[of "UNIV-B" "UNIV-A"] assms
unfolding lower_upper_asymptotic_density_complement by auto
lemma lower_asymptotic_density_lim:
assumes "(λn. card(A ∩ {..<n})/n) ⇢ l"
shows "lower_asymptotic_density A = l"
proof -
have "(λn. ereal(card(A ∩ {..<n})/n)) ⇢ l" using assms by auto
then have "liminf (λn. card(A ∩ {..<n})/n) = l"
using sequentially_bot tendsto_iff_Liminf_eq_Limsup by blast
then show ?thesis unfolding lower_asymptotic_density_def by auto
qed
lemma lower_asymptotic_density_finite:
assumes "finite A"
shows "lower_asymptotic_density A = 0"
using lower_asymptotic_density_in_01(3) upper_asymptotic_density_finite[OF assms] lower_asymptotic_density_le_upper
by (metis antisym_conv)
text ‹In particular, bounded intervals have zero lower density.›
lemma lower_asymptotic_density_bdd_interval [simp]:
"lower_asymptotic_density {} = 0"
"lower_asymptotic_density {..N} = 0"
"lower_asymptotic_density {..<N} = 0"
"lower_asymptotic_density {n..N} = 0"
"lower_asymptotic_density {n..<N} = 0"
"lower_asymptotic_density {n<..N} = 0"
"lower_asymptotic_density {n<..<N} = 0"
by (auto intro!: lower_asymptotic_density_finite)
text ‹Conversely, unbounded intervals have density $1$.›
lemma lower_asymptotic_density_infinite_interval [simp]:
"lower_asymptotic_density {N..} = 1"
"lower_asymptotic_density {N<..} = 1"
"lower_asymptotic_density UNIV = 1"
proof -
have "UNIV - {N..} = {..<N}" by auto
then show "lower_asymptotic_density {N..} = 1"
by (auto simp add: lower_upper_asymptotic_density_complement)
have "UNIV - {N<..} = {..N}" by auto
then show "lower_asymptotic_density {N<..} = 1"
by (auto simp add: lower_upper_asymptotic_density_complement)
show "lower_asymptotic_density UNIV = 1"
by (auto simp add: lower_upper_asymptotic_density_complement)
qed
lemma upper_asymptotic_density_infinite_interval [simp]:
"upper_asymptotic_density {N..} = 1"
"upper_asymptotic_density {N<..} = 1"
"upper_asymptotic_density UNIV = 1"
by (metis antisym upper_asymptotic_density_in_01(2) lower_asymptotic_density_infinite_interval lower_asymptotic_density_le_upper)+
text ‹The intersection of sets with lower density one still has lower density one.›
lemma lower_asymptotic_density_one_intersection:
assumes "lower_asymptotic_density A = 1" "lower_asymptotic_density B = 1"
shows "lower_asymptotic_density (A ∩ B) = 1"
using lower_asymptotic_density_in_01(2)[of "A ∩ B"] lower_asymptotic_density_intersection[of A B] unfolding assms by auto
lemma lower_asymptotic_density_one_finite_Intersection:
assumes "finite I" "⋀i. i ∈ I ⟹ lower_asymptotic_density (A i) = 1"
shows "lower_asymptotic_density (⋂i∈I. A i) = 1"
using assms by (induction rule: finite_induct, auto intro!: lower_asymptotic_density_one_intersection)
text ‹As for the upper asymptotic density, there is a modification of the intersection, akin
to the diagonal argument in this context, for which the ``intersection'' of sets with large
lower density still has large lower density.›
proposition lower_asymptotic_density_decseq_Inter:
assumes "⋀(n::nat). lower_asymptotic_density (A n) ≥ l" "decseq A"
shows "∃B. lower_asymptotic_density B ≥ l ∧ (∀n. ∃N. B ∩ {N..} ⊆ A n)"
proof -
define C where "C = (λn. UNIV - A n)"
have *: "upper_asymptotic_density (C n) ≤ 1 - l" for n
using assms(1)[of n] unfolding C_def lower_upper_asymptotic_density_complement[of "A n"] by auto
have **: "incseq C"
using assms(2) unfolding C_def incseq_def decseq_def by auto
obtain D where D: "upper_asymptotic_density D ≤ 1 - l" "⋀n. ∃N. C n ∩ {N..} ⊆ D"
using upper_asymptotic_density_incseq_Union[OF * **] by blast
define B where "B = UNIV - D"
have "lower_asymptotic_density B ≥ l"
using D(1) lower_upper_asymptotic_density_complement[of B] unfolding B_def by auto
moreover have "∃N. B ∩ {N..} ⊆ A n" for n
using D(2)[of n] unfolding B_def C_def by auto
ultimately show ?thesis by auto
qed
text ‹In the same way, the modified intersection of sets of density $1$ still has density one,
and is eventually contained in each of them.›
proposition lower_asymptotic_density_one_Inter:
assumes "⋀n::nat. lower_asymptotic_density (A n) = 1"
shows "∃B. lower_asymptotic_density B = 1 ∧ (∀n. ∃N. B ∩ {N..} ⊆ A n)"
proof -
define C where "C = (λn. UNIV - A n)"
have *: "upper_asymptotic_density (C n) = 0" for n
using assms(1)[of n] unfolding C_def lower_upper_asymptotic_density_complement[of "A n"] by auto
obtain D where D: "upper_asymptotic_density D = 0" "⋀n. ∃N. C n ∩ {N..} ⊆ D"
using upper_asymptotic_density_zero_Union[OF *] by force
define B where "B = UNIV - D"
have "lower_asymptotic_density B = 1"
using D(1) lower_upper_asymptotic_density_complement[of B] unfolding B_def by auto
moreover have "∃N. B ∩ {N..} ⊆ A n" for n
using D(2)[of n] unfolding B_def C_def by auto
ultimately show ?thesis by auto
qed
text ‹Sets with density $1$ play an important role in relation to Cesaro convergence of nonnegative
bounded sequences: such a sequence converges to $0$ in Cesaro average if and only if it converges
to $0$ along a set of density $1$.
The proof is not hard. Since the Cesaro average tends to $0$, then given $\epsilon>0$ the
proportion of times where $u_n < \epsilon$ tends to $1$, i.e., the set $A_\epsilon$ of such good
times has density $1$. A modified intersection (as constructed in
Proposition~\verb+lower_asymptotic_density_one_Inter+) of these times has density $1$, and
$u_n$ tends to $0$ along this set.
›
theorem cesaro_imp_density_one:
assumes "⋀n. u n ≥ (0::real)" "(λn. (∑i<n. u i)/n) ⇢ 0"
shows "∃A. lower_asymptotic_density A = 1 ∧ (λn. u n * indicator A n) ⇢ 0"
proof -
define B where "B = (λe. {n. u n ≥ e})"
text ‹$B e$ is the set of bad times where $u_n \geq e$. It has density $0$ thanks to
the assumption of Cesaro convergence to $0$.›
have A: "upper_asymptotic_density (B e) = 0" if "e > 0" for e
proof -
have *: "card (B e ∩ {..<n}) / n ≤ (1/e) * ((∑i∈{..<n}. u i)/n)" if "n ≥ 1" for n
proof -
have "e * card (B e ∩ {..<n}) = (∑i∈B e ∩ {..<n}. e)" by auto
also have "... ≤ (∑i∈B e ∩ {..<n}. u i)"
apply (rule sum_mono) unfolding B_def by auto
also have "... ≤ (∑i∈{..<n}. u i)"
apply (rule sum_mono2) using assms by auto
finally show ?thesis
using ‹e > 0› ‹n ≥ 1› by (auto simp add: divide_simps algebra_simps)
qed
have "(λn. card (B e ∩ {..<n}) / n) ⇢ 0"
proof (rule tendsto_sandwich[of "λ_. 0" _ _ "λn. (1/e) * ((∑i∈{..<n}. u i)/n)"])
have "(λn. (1/e) * ((∑i∈{..<n}. u i)/n)) ⇢ (1/e) * 0"
by (intro tendsto_intros assms)
then show "(λn. (1/e) * ((∑i∈{..<n}. u i)/n)) ⇢ 0" by simp
show "∀⇩F n in sequentially. real (card (B e ∩ {..<n})) / real n ≤ 1 / e * (sum u {..<n} / real n)"
using * unfolding eventually_sequentially by auto
qed (auto)
then show ?thesis
by (rule upper_asymptotic_density_lim)
qed
define C where "C = (λn::nat. UNIV - B (((1::real)/2)^n))"
have "lower_asymptotic_density (C n) = 1" for n
unfolding C_def lower_upper_asymptotic_density_complement by (auto intro!: A)
then obtain A where A: "lower_asymptotic_density A = 1" "⋀n. ∃N. A ∩ {N..} ⊆ C n"
using lower_asymptotic_density_one_Inter by blast
have E: "eventually (λn. u n * indicator A n < e) sequentially" if "e > 0" for e
proof -
have "eventually (λn. ((1::real)/2)^n < e) sequentially"
by (rule order_tendstoD[OF _ ‹e > 0›], intro tendsto_intros, auto)
then obtain n where n: "((1::real)/2)^n < e"
unfolding eventually_sequentially by auto
obtain N where N: "A ∩ {N..} ⊆ C n"
using A(2) by blast
have "u k * indicator A k < e" if "k ≥ N" for k
proof (cases "k ∈ A")
case True
then have "k ∈ C n" using N that by auto
then have "u k < ((1::real)/2)^n"
unfolding C_def B_def by auto
then have "u k < e"
using n by auto
then show ?thesis
unfolding indicator_def using True by auto
next
case False
then show ?thesis
unfolding indicator_def using ‹e > 0› by auto
qed
then show ?thesis
unfolding eventually_sequentially by auto
qed
have "(λn. u n * indicator A n) ⇢ 0"
apply (rule order_tendstoI[OF _ E])
unfolding indicator_def using ‹⋀n. u n ≥ 0› by (simp add: less_le_trans)
then show ?thesis
using ‹lower_asymptotic_density A = 1› by auto
qed
text ‹The proof of the reverse implication is more direct: in the Cesaro sum, just bound the
elements in $A$ by a small $\epsilon$, and the other ones by a uniform bound, to get a bound
which is $o(n)$.›
theorem density_one_imp_cesaro:
assumes "⋀n. u n ≥ (0::real)" "⋀n. u n ≤ C"
"lower_asymptotic_density A = 1"
"(λn. u n * indicator A n) ⇢ 0"
shows "(λn. (∑i<n. u i)/n) ⇢ 0"
proof (rule order_tendstoI)
fix e::real assume "e < 0"
have "(∑i<n. u i)/n ≥ 0" for n
using assms(1) by (simp add: sum_nonneg divide_simps)
then have "(∑i<n. u i)/n > e" for n
using ‹e < 0› less_le_trans by auto
then show "eventually (λn. (∑i<n. u i)/n > e) sequentially"
unfolding eventually_sequentially by auto
next
fix e::real assume "e > 0"
have "C ≥ 0" using ‹u 0 ≥ 0› ‹u 0 ≤ C› by auto
have "eventually (λn. u n * indicator A n < e/4) sequentially"
using order_tendstoD(2)[OF assms(4), of "e/4"] ‹e>0› by auto
then obtain N where N: "⋀k. k ≥ N ⟹ u k * indicator A k < e/4"
unfolding eventually_sequentially by auto
define B where "B = UNIV - A"
have *: "upper_asymptotic_density B = 0"
using assms unfolding B_def lower_upper_asymptotic_density_complement by auto
have "eventually (λn. card(B ∩ {..<n}) < (e/(4 * (C+1))) * n) sequentially"
apply (rule upper_asymptotic_densityD) using ‹e > 0› ‹C ≥ 0› * by auto
then obtain M where M: "⋀n. n ≥ M ⟹ card(B ∩ {..<n}) < (e/(4 * (C+1))) * n"
unfolding eventually_sequentially by auto
obtain P::nat where P: "P ≥ 4 * N * C/e"
using real_arch_simple by auto
define Q where "Q = N + M + 1 + P"
have "(∑i<n. u i)/n < e" if "n ≥ Q" for n
proof -
have n: "n ≥ N" "n ≥ M" "n ≥ P" "n ≥ 1"
using ‹n ≥ Q› unfolding Q_def by auto
then have n2: "n ≥ 4 * N * C/e" using P by auto
have "(∑i<n. u i) ≤ (∑i∈{..<N} ∪ ({N..<n} ∩ A) ∪ ({N..<n} - A). u i)"
by (rule sum_mono2, auto simp add: assms)
also have "... = (∑i∈{..<N}. u i) + (∑i∈{N..<n} ∩ A. u i) + (∑i∈{N..<n} - A. u i)"
by (subst sum.union_disjoint, auto)+
also have "... = (∑i∈{..<N}. u i) + (∑i∈{N..<n} ∩ A. u i * indicator A i) + (∑i∈{N..<n} - A. u i)"
unfolding indicator_def by auto
also have "... ≤ (∑i∈{..<N}. u i) + (∑i∈{N..<n}. u i * indicator A i) + (∑i∈ B ∩ {..<n}. u i)"
apply (intro add_mono sum_mono2) unfolding B_def using assms by auto
also have "... ≤ (∑i∈{..<N}. C) + (∑i∈{N..<n}. e/4) + (∑i∈B ∩ {..<n}. C)"
apply (intro add_mono sum_mono) using assms less_imp_le[OF N] by auto
also have "... = N * C + (n-N) * e/4 + card(B ∩ {..<n}) * C"
by auto
also have "... ≤ n * e/4 + n * e/4 + (e/(4 * (C+1))) * n * C"
apply (intro add_mono)
using n2 ‹e > 0› mult_right_mono[OF less_imp_le[OF M[OF ‹n ≥ M›]] ‹C ≥ 0›] by (auto simp add: divide_simps)
also have "... ≤ n * e * 3/4"
using ‹C ≥ 0› ‹e > 0› by (simp add: divide_simps algebra_simps)
also have "... < n * e"
using ‹n ≥ 1› ‹e > 0› by auto
finally show ?thesis
using ‹n ≥ 1› by (simp add: divide_simps algebra_simps)
qed
then show "eventually (λn. (∑i<n. u i)/n < e) sequentially"
unfolding eventually_sequentially by auto
qed
end
Theory Measure_Preserving_Transformations
section ‹Measure preserving or quasi-preserving maps›
theory Measure_Preserving_Transformations
imports SG_Library_Complement
begin
text ‹Ergodic theory in general is the study of the properties of measure preserving or
quasi-preserving dynamical systems. In this section, we introduce the basic definitions
in this respect.›
subsection ‹The different classes of transformations›
definition quasi_measure_preserving::"'a measure ⇒ 'b measure ⇒ ('a ⇒ 'b) set"
where "quasi_measure_preserving M N
= {f ∈ measurable M N. ∀ A ∈ sets N. (f -` A ∩ space M ∈ null_sets M) = (A ∈ null_sets N)}"
lemma quasi_measure_preservingI [intro]:
assumes "f ∈ measurable M N"
"⋀A. A ∈ sets N ⟹ (f -` A ∩ space M ∈ null_sets M) = (A ∈ null_sets N)"
shows "f ∈ quasi_measure_preserving M N"
using assms unfolding quasi_measure_preserving_def by auto
lemma quasi_measure_preservingE:
assumes "f ∈ quasi_measure_preserving M N"
shows "f ∈ measurable M N"
"⋀A. A ∈ sets N ⟹ (f -` A ∩ space M ∈ null_sets M) = (A ∈ null_sets N)"
using assms unfolding quasi_measure_preserving_def by auto
lemma id_quasi_measure_preserving:
"(λx. x) ∈ quasi_measure_preserving M M"
unfolding quasi_measure_preserving_def by auto
lemma quasi_measure_preserving_composition:
assumes "f ∈ quasi_measure_preserving M N"
"g ∈ quasi_measure_preserving N P"
shows "(λx. g(f x)) ∈ quasi_measure_preserving M P"
proof (rule quasi_measure_preservingI)
have f_meas [measurable]: "f ∈ measurable M N" by (rule quasi_measure_preservingE(1)[OF assms(1)])
have g_meas [measurable]: "g ∈ measurable N P" by (rule quasi_measure_preservingE(1)[OF assms(2)])
then show [measurable]: "(λx. g (f x)) ∈ measurable M P" by auto
fix C assume [measurable]: "C ∈ sets P"
define B where "B = g-`C ∩ space N"
have [measurable]: "B ∈ sets N" unfolding B_def by simp
have *: "B ∈ null_sets N ⟷ C ∈ null_sets P"
unfolding B_def using quasi_measure_preservingE(2)[OF assms(2)] by simp
define A where "A = f-`B ∩ space M"
have [measurable]: "A ∈ sets M" unfolding A_def by simp
have "A ∈ null_sets M ⟷ B ∈ null_sets N"
unfolding A_def using quasi_measure_preservingE(2)[OF assms(1)] by simp
then have "A ∈ null_sets M ⟷ C ∈ null_sets P" using * by simp
moreover have "A = (λx. g (f x)) -` C ∩ space M"
by (auto simp add: A_def B_def) (meson f_meas measurable_space)
ultimately show "((λx. g (f x)) -` C ∩ space M ∈ null_sets M) ⟷ C ∈ null_sets P" by simp
qed
lemma quasi_measure_preserving_comp:
assumes "f ∈ quasi_measure_preserving M N"
"g ∈ quasi_measure_preserving N P"
shows "g o f ∈ quasi_measure_preserving M P"
unfolding comp_def using assms quasi_measure_preserving_composition by blast
lemma quasi_measure_preserving_AE:
assumes "f ∈ quasi_measure_preserving M N"
"AE x in N. P x"
shows "AE x in M. P (f x)"
proof -
obtain A where "⋀x. x ∈ space N - A ⟹ P x" "A ∈ null_sets N"
using AE_E3[OF assms(2)] by blast
define B where "B = f-`A ∩ space M"
have "B ∈ null_sets M"
unfolding B_def using quasi_measure_preservingE(2)[OF assms(1)] ‹A ∈ null_sets N› by auto
moreover have "x ∈ space M - B ⟹ P (f x)" for x
using ‹⋀x. x ∈ space N - A ⟹ P x› quasi_measure_preservingE(1)[OF assms(1)]
unfolding B_def by (metis (no_types, lifting) Diff_iff IntI measurable_space vimage_eq)
ultimately show ?thesis using AE_not_in AE_space by force
qed
lemma quasi_measure_preserving_AE':
assumes "f ∈ quasi_measure_preserving M N"
"AE x in M. P (f x)"
"{x ∈ space N. P x} ∈ sets N"
shows "AE x in N. P x"
proof -
have [measurable]: "f ∈ measurable M N" using quasi_measure_preservingE(1)[OF assms(1)] by simp
define U where "U = {x ∈ space N. ¬(P x)}"
have [measurable]: "U ∈ sets N" unfolding U_def using assms(3) by auto
have "f-`U ∩ space M = {x ∈ space M. ¬(P (f x))}"
unfolding U_def using ‹f ∈ measurable M N› by (auto, meson measurable_space)
also have "... ∈ null_sets M"
apply (subst AE_iff_null[symmetric]) using assms by auto
finally have "U ∈ null_sets N"
using quasi_measure_preservingE(2)[OF assms(1) ‹U ∈ sets N›] by auto
then show ?thesis unfolding U_def using AE_iff_null by blast
qed
text ‹The push-forward under a quasi-measure preserving map $f$ of a measure absolutely
continuous with respect to $M$ is absolutely continuous with respect to $N$.›
lemma quasi_measure_preserving_absolutely_continuous:
assumes "f ∈ quasi_measure_preserving M N"
"u ∈ borel_measurable M"
shows "absolutely_continuous N (distr (density M u) N f)"
proof -
have [measurable]: "f ∈ measurable M N" using quasi_measure_preservingE[OF assms(1)] by auto
have "S ∈ null_sets (distr (density M u) N f)" if [measurable]: "S ∈ null_sets N" for S
proof -
have [measurable]: "S ∈ sets N" using null_setsD2[OF that] by auto
have *: "AE x in N. x ∉ S"
by (metis AE_not_in that)
have "AE x in M. f x ∉ S"
by (rule quasi_measure_preserving_AE[OF _ *], simp add: assms)
then have *: "AE x in M. indicator S (f x) * u x = 0"
by force
have "emeasure (distr (density M u) N f) S = (∫⇧+x. indicator S x ∂(distr (density M u) N f))"
by auto
also have "... = (∫⇧+x. indicator S (f x) ∂(density M u))"
by (rule nn_integral_distr, auto)
also have "... = (∫⇧+x. indicator S (f x) * u x ∂M)"
by (rule nn_integral_densityR[symmetric], auto simp add: assms)
also have "... = (∫⇧+x. 0 ∂M)"
using * by (rule nn_integral_cong_AE)
finally have "emeasure (distr (density M u) N f) S = 0" by auto
then show ?thesis by auto
qed
then show ?thesis unfolding absolutely_continuous_def by auto
qed
definition measure_preserving::"'a measure ⇒ 'b measure ⇒ ('a ⇒ 'b) set"
where "measure_preserving M N
= {f ∈ measurable M N. (∀ A ∈ sets N. emeasure M (f-`A ∩ space M) = emeasure N A)}"
lemma measure_preservingE:
assumes "f ∈ measure_preserving M N"
shows "f ∈ measurable M N"
"⋀A. A ∈ sets N ⟹ emeasure M (f-`A ∩ space M) = emeasure N A"
using assms unfolding measure_preserving_def by auto
lemma measure_preservingI [intro]:
assumes "f ∈ measurable M N"
"⋀A. A ∈ sets N ⟹ emeasure M (f-`A ∩ space M) = emeasure N A"
shows "f ∈ measure_preserving M N"
using assms unfolding measure_preserving_def by auto
lemma measure_preserving_distr:
assumes "f ∈ measure_preserving M N"
shows "distr M N f = N"
proof -
let ?N2 = "distr M N f"
have "sets ?N2 = sets N" by simp
moreover have "emeasure ?N2 A = emeasure N A" if "A ∈ sets N" for A
proof -
have "emeasure ?N2 A = emeasure M (f-`A ∩ space M)"
using ‹A ∈ sets N› assms emeasure_distr measure_preservingE(1)[OF assms] by blast
then show "emeasure ?N2 A = emeasure N A"
using ‹A ∈ sets N› measure_preservingE(2)[OF assms] by auto
qed
ultimately show ?thesis by (metis measure_eqI)
qed
lemma measure_preserving_distr':
assumes "f ∈ measurable M N"
shows "f ∈ measure_preserving M (distr M N f)"
proof (rule measure_preservingI)
show "f ∈ measurable M (distr M N f)" using assms(1) by auto
show "emeasure M (f-`A ∩ space M) = emeasure (distr M N f) A" if "A ∈ sets (distr M N f)" for A
using that emeasure_distr[OF assms] by auto
qed
lemma measure_preserving_preserves_nn_integral:
assumes "T ∈ measure_preserving M N"
"f ∈ borel_measurable N"
shows "(∫⇧+x. f x ∂N) = (∫⇧+x. f (T x) ∂M)"
proof -
have "(∫⇧+x. f (T x) ∂M) = (∫⇧+y. f y ∂distr M N T)"
using assms nn_integral_distr[of T M N f, OF measure_preservingE(1)[OF assms(1)]] by simp
also have "... = (∫⇧+y. f y ∂N)"
using measure_preserving_distr[OF assms(1)] by simp
finally show ?thesis by simp
qed
lemma measure_preserving_preserves_integral:
fixes f :: "'a ⇒ 'b::{banach, second_countable_topology}"
assumes "T ∈ measure_preserving M N"
and [measurable]: "integrable N f"
shows "integrable M (λx. f(T x))" "(∫x. f x ∂N) = (∫x. f (T x) ∂M)"
proof -
have a [measurable]: "T ∈ measurable M N" by (rule measure_preservingE(1)[OF assms(1)])
have b [measurable]: "f ∈ borel_measurable N" by simp
have "distr M N T = N" using measure_preserving_distr[OF assms(1)] by simp
then have "integrable (distr M N T) f" using assms(2) by simp
then show "integrable M (λx. f(T x))" using integrable_distr_eq[OF a b] by simp
have "(∫x. f (T x) ∂M) = (∫y. f y ∂distr M N T)" using integral_distr[OF a b] by simp
then show "(∫x. f x ∂N) = (∫x. f (T x) ∂M)" using ‹distr M N T = N› by simp
qed
lemma measure_preserving_preserves_integral':
fixes f :: "'a ⇒ 'b::{banach, second_countable_topology}"
assumes "T ∈ measure_preserving M N"
and [measurable]: "integrable M (λx. f (T x))" "f ∈ borel_measurable N"
shows "integrable N f" "(∫x. f x ∂N) = (∫x. f (T x) ∂M)"
proof -
have a [measurable]: "T ∈ measurable M N" by (rule measure_preservingE(1)[OF assms(1)])
have "integrable M (λx. f(T x))" using assms(2) unfolding comp_def by auto
then have "integrable (distr M N T) f"
using integrable_distr_eq[OF a assms(3)] by simp
then show *: "integrable N f" using measure_preserving_distr[OF assms(1)] by simp
then show "(∫x. f x ∂N) = (∫x. f (T x) ∂M)"
using measure_preserving_preserves_integral[OF assms(1) *] by simp
qed
lemma id_measure_preserving:
"(λx. x) ∈ measure_preserving M M"
unfolding measure_preserving_def by auto
lemma measure_preserving_is_quasi_measure_preserving:
assumes "f ∈ measure_preserving M N"
shows "f ∈ quasi_measure_preserving M N"
using assms unfolding measure_preserving_def quasi_measure_preserving_def apply auto
by (metis null_setsD1 null_setsI, metis measurable_sets null_setsD1 null_setsI)
lemma measure_preserving_composition:
assumes "f ∈ measure_preserving M N"
"g ∈ measure_preserving N P"
shows "(λx. g(f x)) ∈ measure_preserving M P"
proof (rule measure_preservingI)
have f [measurable]: "f ∈ measurable M N" by (rule measure_preservingE(1)[OF assms(1)])
have g [measurable]: "g ∈ measurable N P" by (rule measure_preservingE(1)[OF assms(2)])
show [measurable]: "(λx. g (f x)) ∈ measurable M P" by auto
fix C assume [measurable]: "C ∈ sets P"
define B where "B = g-`C ∩ space N"
have [measurable]: "B ∈ sets N" unfolding B_def by simp
have *: "emeasure N B = emeasure P C"
unfolding B_def using measure_preservingE(2)[OF assms(2)] by simp
define A where "A = f-`B ∩ space M"
have [measurable]: "A ∈ sets M" unfolding A_def by simp
have "emeasure M A = emeasure N B"
unfolding A_def using measure_preservingE(2)[OF assms(1)] by simp
then have "emeasure M A = emeasure P C" using * by simp
moreover have "A = (λx. g(f x))-`C ∩ space M"
by (auto simp add: A_def B_def) (meson f measurable_space)
ultimately show "emeasure M ((λx. g(f x))-`C ∩ space M) = emeasure P C" by simp
qed
lemma measure_preserving_comp:
assumes "f ∈ measure_preserving M N"
"g ∈ measure_preserving N P"
shows "g o f ∈ measure_preserving M P"
unfolding o_def using measure_preserving_composition assms by blast
lemma measure_preserving_total_measure:
assumes "f ∈ measure_preserving M N"
shows "emeasure M (space M) = emeasure N (space N)"
proof -
have "f ∈ measurable M N" by (rule measure_preservingE(1)[OF assms(1)])
then have "f-`(space N) ∩ space M = space M" by (meson Int_absorb1 measurable_space subsetI vimageI)
then show "emeasure M (space M) = emeasure N (space N)"
by (metis (mono_tags, lifting) measure_preservingE(2)[OF assms(1)] sets.top)
qed
lemma measure_preserving_finite_measure:
assumes "f ∈ measure_preserving M N"
shows "finite_measure M ⟷ finite_measure N"
using measure_preserving_total_measure[OF assms]
by (metis finite_measure.emeasure_finite finite_measureI infinity_ennreal_def)
lemma measure_preserving_prob_space:
assumes "f ∈ measure_preserving M N"
shows "prob_space M ⟷ prob_space N"
using measure_preserving_total_measure[OF assms] by (metis prob_space.emeasure_space_1 prob_spaceI)
locale qmpt = sigma_finite_measure +
fixes T
assumes Tqm: "T ∈ quasi_measure_preserving M M"
locale mpt = qmpt +
assumes Tm: "T ∈ measure_preserving M M"
locale fmpt = mpt + finite_measure
locale pmpt = fmpt + prob_space
lemma qmpt_I:
assumes "sigma_finite_measure M"
"T ∈ measurable M M"
"⋀A. A ∈ sets M ⟹ ((T-`A ∩ space M) ∈ null_sets M) ⟷ (A ∈ null_sets M)"
shows "qmpt M T"
unfolding qmpt_def qmpt_axioms_def quasi_measure_preserving_def
by (auto simp add: assms)
lemma mpt_I:
assumes "sigma_finite_measure M"
"T ∈ measurable M M"
"⋀A. A ∈ sets M ⟹ emeasure M (T-`A ∩ space M) = emeasure M A"
shows "mpt M T"
proof -
have *: "T ∈ measure_preserving M M"
by (rule measure_preservingI[OF assms(2) assms(3)])
then have **: "T ∈ quasi_measure_preserving M M"
using measure_preserving_is_quasi_measure_preserving by auto
show "mpt M T"
unfolding mpt_def qmpt_def qmpt_axioms_def mpt_axioms_def using * ** assms(1) by auto
qed
lemma fmpt_I:
assumes "finite_measure M"
"T ∈ measurable M M"
"⋀A. A ∈ sets M ⟹ emeasure M (T-`A ∩ space M) = emeasure M A"
shows "fmpt M T"
proof -
have *: "T ∈ measure_preserving M M"
by (rule measure_preservingI[OF assms(2) assms(3)])
then have **: "T ∈ quasi_measure_preserving M M"
using measure_preserving_is_quasi_measure_preserving by auto
show "fmpt M T"
unfolding fmpt_def mpt_def qmpt_def mpt_axioms_def qmpt_axioms_def
using * ** assms(1) finite_measure_def by auto
qed
lemma pmpt_I:
assumes "prob_space M"
"T ∈ measurable M M"
"⋀A. A ∈ sets M ⟹ emeasure M (T-`A ∩ space M) = emeasure M A"
shows "pmpt M T"
proof -
have *: "T ∈ measure_preserving M M"
by (rule measure_preservingI[OF assms(2) assms(3)])
then have **: "T ∈ quasi_measure_preserving M M"
using measure_preserving_is_quasi_measure_preserving by auto
show "pmpt M T"
unfolding pmpt_def fmpt_def mpt_def qmpt_def mpt_axioms_def qmpt_axioms_def
using * ** assms(1) prob_space_imp_sigma_finite prob_space.finite_measure by auto
qed
subsection ‹Examples›
lemma fmpt_null_space:
assumes "emeasure M (space M) = 0"
"T ∈ measurable M M"
shows "fmpt M T"
apply (rule fmpt_I)
apply (auto simp add: assms finite_measureI)
apply (metis assms emeasure_eq_0 measurable_sets sets.sets_into_space sets.top)
done
lemma fmpt_empty_space:
assumes "space M = {}"
shows "fmpt M T"
by (rule fmpt_null_space, auto simp add: assms measurable_empty_iff)
text ‹Translations are measure-preserving›
lemma mpt_translation:
fixes c :: "'a::euclidean_space"
shows "mpt lborel (λx. x + c)"
proof (rule mpt_I, auto simp add: lborel.sigma_finite_measure_axioms)
fix A::"'a set" assume [measurable]: "A ∈ sets borel"
have "emeasure lborel ((λx. x + c) -` A) = emeasure lborel ((((+))c)-`A)" by (meson add.commute)
also have "... = emeasure lborel ((((+))c)-`A ∩ space lborel)" by simp
also have "... = emeasure (distr lborel borel ((+) c)) A" by (rule emeasure_distr[symmetric], auto)
also have "... = emeasure lborel A" using lborel_distr_plus[of c] by simp
finally show "emeasure lborel ((λx. x + c) -` A) = emeasure lborel A" by simp
qed
text ‹Skew products are fibered maps of the form $(x,y)\mapsto (Tx, U(x,y))$. If the base map
and the fiber maps all are measure preserving, so is the skew product.›
lemma pair_measure_null_product:
assumes "emeasure M (space M) = 0"
shows "emeasure (M ⨂⇩M N) (space (M ⨂⇩M N)) = 0"
proof -
have "(∫⇧+x. (∫⇧+y. indicator X (x,y) ∂N) ∂M) = 0" for X
proof -
have "(∫⇧+x. (∫⇧+y. indicator X (x,y) ∂N) ∂M) = (∫⇧+x. 0 ∂M)"
by (intro nn_integral_cong_AE emeasure_0_AE[OF assms])
then show ?thesis by auto
qed
then have "M ⨂⇩M N = measure_of (space M × space N)
{a × b | a b. a ∈ sets M ∧ b ∈ sets N}
(λX. 0)"
unfolding pair_measure_def by auto
then show ?thesis by (simp add: emeasure_sigma)
qed
lemma mpt_skew_product:
assumes "mpt M T"
"AE x in M. mpt N (U x)"
and [measurable]: "(λ(x,y). U x y) ∈ measurable (M ⨂⇩M N) N"
shows "mpt (M ⨂⇩M N) (λ(x,y). (T x, U x y))"
proof (cases)
assume H: "emeasure M (space M) = 0"
then have *: "emeasure (M ⨂⇩M N) (space (M ⨂⇩M N)) = 0"
using pair_measure_null_product by auto
have [measurable]: "T ∈ measurable M M"
using assms(1) unfolding mpt_def qmpt_def qmpt_axioms_def quasi_measure_preserving_def by auto
then have [measurable]: "(λ(x, y). (T x, U x y)) ∈ measurable (M ⨂⇩M N) (M ⨂⇩M N)" by auto
with fmpt_null_space[OF *] show ?thesis by (simp add: fmpt.axioms(1))
next
assume "¬(emeasure M (space M) = 0)"
show ?thesis
proof (rule mpt_I)
have "sigma_finite_measure M" using assms(1) unfolding mpt_def qmpt_def by auto
then interpret M: sigma_finite_measure M .
have "∃p. ¬ almost_everywhere M p"
by (metis (lifting) AE_E ‹emeasure M (space M) ≠ 0› emeasure_eq_AE emeasure_notin_sets)
then have "∃x. mpt N (U x)" using assms(2) ‹¬(emeasure M (space M) = 0)›
by (metis (full_types) ‹AE x in M. mpt N (U x)› eventually_mono)
then have "sigma_finite_measure N" unfolding mpt_def qmpt_def by auto
then interpret N: sigma_finite_measure N .
show "sigma_finite_measure (M ⨂⇩M N)"
by (rule sigma_finite_pair_measure) standard+
have [measurable]: "T ∈ measurable M M"
using assms(1) unfolding mpt_def qmpt_def qmpt_axioms_def quasi_measure_preserving_def by auto
show [measurable]: "(λ(x, y). (T x, U x y)) ∈ measurable (M ⨂⇩M N) (M ⨂⇩M N)" by auto
have "T ∈ measure_preserving M M" using assms(1) by (simp add: mpt.Tm)
fix A assume [measurable]: "A ∈ sets (M ⨂⇩M N)"
then have [measurable]: "(λ (x,y). (indicator A (x,y))::ennreal) ∈ borel_measurable (M ⨂⇩M N)" by auto
then have [measurable]: "(λx. ∫⇧+ y. indicator A (x, y) ∂N) ∈ borel_measurable M"
by simp
define B where "B = (λ(x, y). (T x, U x y)) -` A ∩ space (M ⨂⇩M N)"
then have [measurable]: "B ∈ sets (M ⨂⇩M N)" by auto
have "(∫⇧+y. indicator B (x,y) ∂N) = (∫⇧+y. indicator A (T x, y) ∂N)" if "x ∈ space M" "mpt N (U x)" for x
proof -
have "T x ∈ space M" by (meson ‹T ∈ measurable M M› ‹x ∈ space M› measurable_space)
then have 1: "(λy. (indicator A (T x, y))::ennreal) ∈ borel_measurable N" using ‹A ∈ sets (M ⨂⇩M N)› by auto
have 2: "⋀y. ((indicator B (x, y))::ennreal) = indicator A (T x, U x y) * indicator (space M) x * indicator (space N) y"
unfolding B_def by (simp add: indicator_def space_pair_measure)
have 3: "U x ∈ measure_preserving N N" using assms(2) that(2) by (simp add: mpt.Tm)
have "(∫⇧+y. indicator B (x,y) ∂N) = (∫⇧+y. indicator A (T x, U x y) ∂N)"
using 2 by (intro nn_integral_cong_simp) (auto simp add: indicator_def ‹x ∈ space M›)
also have "... = (∫⇧+y. indicator A (T x, y) ∂N)"
by (rule measure_preserving_preserves_nn_integral[OF 3, symmetric], metis 1)
finally show ?thesis by simp
qed
then have *: "AE x in M. (∫⇧+y. indicator B (x,y) ∂N) = (∫⇧+y. indicator A (T x, y) ∂N)"
using assms(2) by auto
have "emeasure (M ⨂⇩M N) B = (∫⇧+ x. (∫⇧+y. indicator B (x,y) ∂N) ∂M)"
using ‹B ∈ sets (M ⨂⇩M N)› ‹sigma_finite_measure N› sigma_finite_measure.emeasure_pair_measure by fastforce
also have "... = (∫⇧+ x. (∫⇧+y. indicator A (T x, y) ∂N) ∂M)"
by (intro nn_integral_cong_AE *)
also have "... = (∫⇧+ x. (∫⇧+y. indicator A (x, y) ∂N) ∂M)"
by (rule measure_preserving_preserves_nn_integral[OF ‹T ∈ measure_preserving M M›, symmetric]) auto
also have "... = emeasure (M ⨂⇩M N) A"
by (simp add: ‹sigma_finite_measure N› sigma_finite_measure.emeasure_pair_measure)
finally show "emeasure (M ⨂⇩M N) ((λ(x, y). (T x, U x y)) -` A ∩ space (M ⨂⇩M N)) = emeasure (M ⨂⇩M N) A"
unfolding B_def by simp
qed
qed
lemma mpt_skew_product_real:
fixes f::"'a ⇒ 'b::euclidean_space"
assumes "mpt M T" and [measurable]: "f ∈ borel_measurable M"
shows "mpt (M ⨂⇩M lborel) (λ(x,y). (T x, y + f x))"
by (rule mpt_skew_product, auto simp add: mpt_translation assms(1))
subsection ‹Preimages restricted to $space M$›
context qmpt begin
text ‹One is all the time lead to take the preimages of sets, and restrict them to
\verb+space M+ where the dynamics is living. We introduce a shortcut for this notion.›
definition vimage_restr :: "('a ⇒ 'a) ⇒ 'a set ⇒ 'a set" (infixr "--`" 90)
where
"f --` A ≡ f-` (A ∩ space M) ∩ space M"
lemma vrestr_eq [simp]:
"a ∈ f--` A ⟷ a ∈ space M ∧ f a ∈ A ∩ space M"
unfolding vimage_restr_def by auto
lemma vrestr_intersec [simp]:
"f--` (A ∩ B) = (f--`A) ∩ (f--` B)"
using vimage_restr_def by auto
lemma vrestr_union [simp]:
"f--` (A ∪ B) = f--`A ∪ f--`B"
using vimage_restr_def by auto
lemma vrestr_difference [simp]:
"f--`(A-B) = f--`A - f--`B"
using vimage_restr_def by auto
lemma vrestr_inclusion:
"A ⊆ B ⟹ f--`A ⊆ f--`B"
using vimage_restr_def by auto
lemma vrestr_Union [simp]:
"f --` (⋃A) = (⋃X∈A. f --` X)"
using vimage_restr_def by auto
lemma vrestr_UN [simp]:
"f --` (⋃x∈A. B x) = (⋃x∈A. f --` B x)"
using vimage_restr_def by auto
lemma vrestr_Inter [simp]:
assumes "A ≠ {}"
shows "f --` (⋂A) = (⋂X∈A. f --` X)"
using vimage_restr_def assms by auto
lemma vrestr_INT [simp]:
assumes "A ≠ {}"
shows "f --` (⋂x∈A. B x) = (⋂x∈A. f --` B x)"
using vimage_restr_def assms by auto
lemma vrestr_empty [simp]:
"f--`{} = {}"
using vimage_restr_def by auto
lemma vrestr_sym_diff [simp]:
"f--`(A Δ B) = (f--`A) Δ (f--`B)"
by auto
lemma vrestr_image:
assumes "x ∈ f--`A"
shows "x ∈ space M" "f x ∈ space M" "f x ∈ A"
using assms unfolding vimage_restr_def by auto
lemma vrestr_intersec_in_space:
assumes "A ∈ sets M" "B ∈ sets M"
shows "A ∩ f--`B = A ∩ f-`B"
unfolding vimage_restr_def using assms sets.sets_into_space by auto
lemma vrestr_compose:
assumes "g ∈ measurable M M"
shows "(λ x. f(g x))--` A = g--` (f--` A)"
proof -
define B where "B = A ∩ space M"
have "(λ x. f(g x))--` A = (λ x. f(g x)) -` B ∩ space M"
using B_def vimage_restr_def by blast
moreover have "(λ x. f(g x)) -` B ∩ space M = g-` (f-` B ∩ space M) ∩ space M"
using measurable_space[OF ‹g ∈ measurable M M›] by auto
moreover have "g-` (f-` B ∩ space M) ∩ space M = g--` (f--` A)"
using B_def vimage_restr_def by simp
ultimately show ?thesis by auto
qed
lemma vrestr_comp:
assumes "g ∈ measurable M M"
shows "(f o g)--` A = g--` (f--` A)"
proof -
have "f o g = (λ x. f(g x))" by auto
then have "(f o g)--` A = (λ x. f(g x))--` A" by auto
moreover have "(λ x. f(g x))--` A = g--` (f--` A)" using vrestr_compose assms by auto
ultimately show ?thesis by simp
qed
lemma vrestr_of_set:
assumes "g ∈ measurable M M"
shows "A ∈ sets M ⟹ g--`A = g-`A ∩ space M"
by (simp add: vimage_restr_def)
lemma vrestr_meas [measurable (raw)]:
assumes "g ∈ measurable M M"
"A ∈ sets M"
shows "g--`A ∈ sets M"
using assms vimage_restr_def by auto
lemma vrestr_same_emeasure_f:
assumes "f ∈ measure_preserving M M"
"A ∈ sets M"
shows "emeasure M (f--`A) = emeasure M A"
by (metis (mono_tags, lifting) assms measure_preserving_def mem_Collect_eq sets.Int_space_eq2 vimage_restr_def)
lemma vrestr_same_measure_f:
assumes "f ∈ measure_preserving M M"
"A ∈ sets M"
shows "measure M (f--`A) = measure M A"
proof -
have "measure M (f--`A) = enn2real (emeasure M (f--`A))" by (simp add: Sigma_Algebra.measure_def)
also have "... = enn2real (emeasure M A)" using vrestr_same_emeasure_f[OF assms] by simp
also have "... = measure M A" by (simp add: Sigma_Algebra.measure_def)
finally show "measure M (f--` A) = measure M A" by simp
qed
subsection ‹Basic properties of qmpt›
lemma T_meas [measurable (raw)]:
"T ∈ measurable M M"
by (rule quasi_measure_preservingE(1)[OF Tqm])
lemma Tn_quasi_measure_preserving:
"T^^n ∈ quasi_measure_preserving M M"
proof (induction n)
case 0
show ?case using id_quasi_measure_preserving by simp
next
case (Suc n)
then show ?case using Tqm quasi_measure_preserving_comp by (metis funpow_Suc_right)
qed
lemma Tn_meas [measurable (raw)]:
"T^^n ∈ measurable M M"
by (rule quasi_measure_preservingE(1)[OF Tn_quasi_measure_preserving])
lemma T_vrestr_meas [measurable]:
assumes "A ∈ sets M"
shows "T--` A ∈ sets M"
"(T^^n)--` A ∈ sets M"
by (auto simp add: vrestr_meas assms)
text ‹We state the next lemma both with $T^0$ and with $id$ as sometimes the simplifier
simplifies $T^0$ to $id$ before applying the first instance of the lemma.›
lemma T_vrestr_0 [simp]:
assumes "A ∈ sets M"
shows "(T^^0)--`A = A"
"id--`A = A"
using sets.sets_into_space[OF assms] by auto
lemma T_vrestr_composed:
assumes "A ∈ sets M"
shows "(T^^n)--` (T^^m)--` A = (T^^(n+m))--` A"
"T--` (T^^m)--` A = (T^^(m+1))--` A"
"(T^^m)--` T--` A = (T^^(m+1))--` A"
proof -
show "(T^^n)--` (T^^m)--` A = (T^^(n+m))--` A"
by (simp add: Tn_meas funpow_add add.commute vrestr_comp)
show "T--` (T^^m)--` A = (T^^(m+1))--` A"
by (metis Suc_eq_plus1 T_meas funpow_Suc_right vrestr_comp)
show "(T^^m)--` T--` A = (T^^(m+1))--` A"
by (simp add: Tn_meas vrestr_comp)
qed
text ‹In the next two lemmas, we give measurability statements that show up all the time
for the usual preimage.›
lemma T_intersec_meas [measurable]:
assumes [measurable]: "A ∈ sets M" "B ∈ sets M"
shows "A ∩ T-`B ∈ sets M"
"A ∩ (T^^n)-`B ∈ sets M"
"T-`A ∩ B ∈ sets M"
"(T^^n)-`A ∩ B ∈ sets M"
"A ∩ (T ∘ T ^^ n) -` B ∈ sets M"
"(T ∘ T ^^ n) -` A ∩ B ∈ sets M"
by (metis T_meas Tn_meas assms(1) assms(2) measurable_comp sets.Int inf_commute
vrestr_intersec_in_space vrestr_meas)+
lemma T_diff_meas [measurable]:
assumes [measurable]: "A ∈ sets M" "B ∈ sets M"
shows "A - T-`B ∈ sets M"
"A - (T^^n)-`B ∈ sets M"
proof -
have "A - T-`B = A ∩ space M - (T-`B ∩ space M)"
using sets.sets_into_space[OF assms(1)] by auto
then show "A - T-`B ∈ sets M" by auto
have "A - (T^^n)-`B = A ∩ space M - ((T^^n)-`B ∩ space M)"
using sets.sets_into_space[OF assms(1)] by auto
then show "A - (T^^n)-`B ∈ sets M" by auto
qed
lemma T_spaceM_stable [simp]:
assumes "x ∈ space M"
shows "T x ∈ space M"
"(T^^n) x ∈ space M"
proof -
show "T x ∈ space M" by (meson measurable_space T_meas measurable_def assms)
show "(T^^n) x ∈ space M" by (meson measurable_space Tn_meas measurable_def assms)
qed
lemma T_quasi_preserves_null:
assumes "A ∈ sets M"
shows "A ∈ null_sets M ⟷ T--` A ∈ null_sets M"
"A ∈ null_sets M ⟷ (T^^n)--` A ∈ null_sets M"
using Tqm Tn_quasi_measure_preserving unfolding quasi_measure_preserving_def
by (auto simp add: assms vimage_restr_def)
lemma T_quasi_preserves:
assumes "A ∈ sets M"
shows "emeasure M A = 0 ⟷ emeasure M (T--` A) = 0"
"emeasure M A = 0 ⟷ emeasure M ((T^^n)--` A) = 0"
using T_quasi_preserves_null[OF assms] T_vrestr_meas assms by blast+
lemma T_quasi_preserves_null2:
assumes "A ∈ null_sets M"
shows "T--` A ∈ null_sets M"
"(T^^n)--` A ∈ null_sets M"
using T_quasi_preserves_null[OF null_setsD2[OF assms]] assms by auto
lemma T_composition_borel [measurable]:
assumes "f ∈ borel_measurable M"
shows "(λx. f(T x)) ∈ borel_measurable M" "(λx. f((T^^k) x)) ∈ borel_measurable M"
using T_meas Tn_meas assms measurable_compose by auto
lemma T_AE_iterates:
assumes "AE x in M. P x"
shows "AE x in M. ∀n. P ((T^^n) x)"
proof -
have "AE x in M. P ((T^^n) x)" for n
by (rule quasi_measure_preserving_AE[OF Tn_quasi_measure_preserving[of n] assms])
then show ?thesis unfolding AE_all_countable by simp
qed
lemma qmpt_power:
"qmpt M (T^^n)"
by (standard, simp add: Tn_quasi_measure_preserving)
lemma T_Tn_T_compose:
"T ((T^^n) x) = (T^^(Suc n)) x"
"(T^^n) (T x) = (T^^(Suc n)) x"
by (auto simp add: funpow_swap1)
lemma (in qmpt) qmpt_density:
assumes [measurable]: "h ∈ borel_measurable M"
and "AE x in M. h x ≠ 0" "AE x in M. h x ≠ ∞"
shows "qmpt (density M h) T"
proof -
interpret A: sigma_finite_measure "density M h"
apply (subst sigma_finite_iff_density_finite) using assms by auto
show ?thesis
apply (standard) apply (rule quasi_measure_preservingI)
unfolding null_sets_density[OF ‹h ∈ borel_measurable M› ‹AE x in M. h x ≠ 0›] sets_density space_density
using quasi_measure_preservingE(2)[OF Tqm] by auto
qed
end
subsection ‹Basic properties of mpt›
context mpt
begin
lemma Tn_measure_preserving:
"T^^n ∈ measure_preserving M M"
proof (induction n)
case (Suc n)
then show ?case using Tm measure_preserving_comp by (metis funpow_Suc_right)
qed (simp add: id_measure_preserving)
lemma T_integral_preserving:
fixes f :: "'a ⇒ 'b::{banach, second_countable_topology}"
assumes "integrable M f"
shows "integrable M (λx. f(T x))" "(∫x. f(T x) ∂M) = (∫x. f x ∂M)"
using measure_preserving_preserves_integral[OF Tm assms] by auto
lemma Tn_integral_preserving:
fixes f :: "'a ⇒ 'b::{banach, second_countable_topology}"
assumes "integrable M f"
shows "integrable M (λx. f((T^^n) x))" "(∫x. f((T^^n) x) ∂M) = (∫x. f x ∂M)"
using measure_preserving_preserves_integral[OF Tn_measure_preserving assms] by auto
lemma T_nn_integral_preserving:
fixes f :: "'a ⇒ ennreal"
assumes "f ∈ borel_measurable M"
shows "(∫⇧+x. f(T x) ∂M) = (∫⇧+x. f x ∂M)"
using measure_preserving_preserves_nn_integral[OF Tm assms] by auto
lemma Tn_nn_integral_preserving:
fixes f :: "'a ⇒ ennreal"
assumes "f ∈ borel_measurable M"
shows "(∫⇧+x. f((T^^n) x) ∂M) = (∫⇧+x. f x ∂M)"
using measure_preserving_preserves_nn_integral[OF Tn_measure_preserving assms(1)] by auto
lemma mpt_power:
"mpt M (T^^n)"
by (standard, simp_all add: Tn_quasi_measure_preserving Tn_measure_preserving)
lemma T_vrestr_same_emeasure:
assumes "A ∈ sets M"
shows "emeasure M (T--` A) = emeasure M A"
"emeasure M ((T ^^ n)--`A) = emeasure M A"
by (auto simp add: vrestr_same_emeasure_f Tm Tn_measure_preserving assms)
lemma T_vrestr_same_measure:
assumes "A ∈ sets M"
shows "measure M (T--` A) = measure M A"
"measure M ((T ^^ n)--`A) = measure M A"
by (auto simp add: vrestr_same_measure_f Tm Tn_measure_preserving assms)
lemma (in fmpt) fmpt_power:
"fmpt M (T^^n)"
by (standard, simp_all add: Tn_quasi_measure_preserving Tn_measure_preserving)
end
subsection ‹Birkhoff sums›
text ‹Birkhoff sums, obtained by summing a function along the orbit of a map, are basic objects
to be understood in ergodic theory.›
context qmpt
begin
definition birkhoff_sum::"('a ⇒ 'b::comm_monoid_add) ⇒ nat ⇒ 'a ⇒ 'b"
where "birkhoff_sum f n x = (∑i∈{..<n}. f((T^^i)x))"
lemma birkhoff_sum_meas [measurable]:
fixes f::"'a ⇒ 'b::{second_countable_topology, topological_comm_monoid_add}"
assumes "f ∈ borel_measurable M"
shows "birkhoff_sum f n ∈ borel_measurable M"
proof -
define F where "F = (λi x. f((T^^i)x))"
have "⋀i. F i ∈ borel_measurable M" using assms F_def by auto
then have "(λx. (∑i<n. F i x)) ∈ borel_measurable M" by measurable
then have "(λx. birkhoff_sum f n x) ∈ borel_measurable M" unfolding birkhoff_sum_def F_def by auto
then show ?thesis by simp
qed
lemma birkhoff_sum_1 [simp]:
"birkhoff_sum f 0 x = 0"
"birkhoff_sum f 1 x = f x"
"birkhoff_sum f (Suc 0) x = f x"
unfolding birkhoff_sum_def by auto
lemma birkhoff_sum_cocycle:
"birkhoff_sum f (n+m) x = birkhoff_sum f n x + birkhoff_sum f m ((T^^n)x)"
proof -
have "(∑i<m. f ((T ^^ i) ((T ^^ n) x))) = (∑i<m. f ((T ^^ (i+n)) x))" by (simp add: funpow_add)
also have "... = (∑j∈{n..< m+n}. f ((T ^^j) x))"
using atLeast0LessThan sum.shift_bounds_nat_ivl[where ?g = "λj. f((T^^j)x)" and ?k = n and ?m = 0 and ?n = m, symmetric]
add.commute add.left_neutral by auto
finally have *: "birkhoff_sum f m ((T^^n)x) = (∑j∈{n..< m+n}. f ((T ^^j) x))" unfolding birkhoff_sum_def by auto
have "birkhoff_sum f (n+m) x = (∑i<n. f((T^^i)x)) + (∑i∈{n..<m+n}. f((T^^i)x))"
unfolding birkhoff_sum_def by (metis add.commute add.right_neutral atLeast0LessThan le_add2 sum.atLeastLessThan_concat)
also have "... = birkhoff_sum f n x + (∑i∈{n..<m+n}. f((T^^i)x))" unfolding birkhoff_sum_def by simp
finally show ?thesis using * by simp
qed
lemma birkhoff_sum_mono:
fixes f g::"_ ⇒ real"
assumes "⋀x. f x ≤ g x"
shows "birkhoff_sum f n x ≤ birkhoff_sum g n x"
unfolding birkhoff_sum_def by (simp add: assms sum_mono)
lemma birkhoff_sum_abs:
fixes f::"_ ⇒ 'b::real_normed_vector"
shows "norm(birkhoff_sum f n x) ≤ birkhoff_sum (λx. norm(f x)) n x"
unfolding birkhoff_sum_def using norm_sum by auto
lemma birkhoff_sum_add:
"birkhoff_sum (λx. f x + g x) n x = birkhoff_sum f n x + birkhoff_sum g n x"
unfolding birkhoff_sum_def by (simp add: sum.distrib)
lemma birkhoff_sum_diff:
fixes f g::"_ ⇒ real"
shows "birkhoff_sum (λx. f x - g x) n x = birkhoff_sum f n x - birkhoff_sum g n x"
unfolding birkhoff_sum_def by (simp add: sum_subtractf)
lemma birkhoff_sum_cmult:
fixes f::"_ ⇒ real"
shows "birkhoff_sum (λx. c * f x) n x = c * birkhoff_sum f n x"
unfolding birkhoff_sum_def by (simp add: sum_distrib_left)
lemma skew_product_real_iterates:
fixes f::"'a ⇒ real"
shows "((λ(x,y). (T x, y + f x))^^n) (x,y) = ((T^^n) x, y + birkhoff_sum f n x)"
apply (induction n)
apply (auto)
apply (metis (no_types, lifting) Suc_eq_plus1 birkhoff_sum_cocycle qmpt.birkhoff_sum_1(2) qmpt_axioms)
done
end
lemma (in mpt) birkhoff_sum_integral:
fixes f :: "'a ⇒ 'b::{banach, second_countable_topology}"
assumes [measurable]: "integrable M f"
shows "integrable M (birkhoff_sum f n)" "(∫x. birkhoff_sum f n x ∂M) = n *⇩R (∫x. f x ∂M)"
proof -
have a: "⋀k. integrable M (λx. f((T^^k) x))"
using Tn_integral_preserving(1) assms by blast
then have "integrable M (λx. ∑k∈{..<n}. f((T^^k) x))" by simp
then have "integrable M (λx. birkhoff_sum f n x)" unfolding birkhoff_sum_def by auto
then show "integrable M (birkhoff_sum f n)" by simp
have b: "⋀k. (∫x. f((T^^k)x) ∂M) = (∫x. f x ∂M)"
using Tn_integral_preserving(2) assms by blast
have "(∫x. birkhoff_sum f n x ∂M) = (∫x. (∑k∈{..<n}. f((T^^k) x)) ∂M)"
unfolding birkhoff_sum_def by blast
also have "... = (∑k∈{..<n}. (∫x. f((T^^k) x) ∂M))"
by (rule Bochner_Integration.integral_sum, simp add: a)
also have "... = (∑k∈{..<n}. (∫x. f x ∂M))" using b by simp
also have "... = n *⇩R (∫x. f x ∂M)" by (simp add: sum_constant_scaleR)
finally show "(∫x. birkhoff_sum f n x ∂M) = n *⇩R (∫x. f x ∂M)" by simp
qed
lemma (in mpt) birkhoff_sum_nn_integral:
fixes f :: "'a ⇒ ennreal"
assumes [measurable]: "f ∈ borel_measurable M" and pos: "⋀x. f x ≥ 0"
shows "(∫⇧+x. birkhoff_sum f n x ∂M) = n * (∫⇧+x. f x ∂M)"
proof -
have [measurable]: "⋀k. (λx. f((T^^k)x)) ∈ borel_measurable M" by simp
have posk: "⋀k x. f((T^^k)x) ≥ 0" using pos by simp
have b: "⋀k. (∫⇧+x. f((T^^k)x) ∂M) = (∫⇧+x. f x ∂M)"
using Tn_nn_integral_preserving assms by blast
have "(∫⇧+x. birkhoff_sum f n x ∂M) = (∫⇧+x. (∑k∈{..<n}. f((T^^k) x)) ∂M)"
unfolding birkhoff_sum_def by blast
also have "... = (∑k∈{..<n}. (∫⇧+x. f((T^^k) x) ∂M))"
by (rule nn_integral_sum, auto simp add: posk)
also have "... = (∑k∈{..<n}. (∫⇧+x. f x ∂M))" using b by simp
also have "... = n * (∫⇧+x. f x ∂M)" by simp
finally show "(∫⇧+x. birkhoff_sum f n x ∂M) = n * (∫⇧+x. f x ∂M)" by simp
qed
subsection ‹Inverse map›
context qmpt begin
definition
"invertible_qmpt ≡ (bij T ∧ inv T ∈ measurable M M)"
definition
"Tinv ≡ inv T"
lemma T_Tinv_of_set:
assumes "invertible_qmpt"
"A ∈ sets M"
shows "T-`(Tinv-`A ∩ space M) ∩ space M = A"
using assms sets.sets_into_space unfolding Tinv_def invertible_qmpt_def
apply (auto simp add: bij_betw_def)
using T_spaceM_stable(1) by blast
lemma Tinv_quasi_measure_preserving:
assumes "invertible_qmpt"
shows "Tinv ∈ quasi_measure_preserving M M"
proof (rule quasi_measure_preservingI, auto)
fix A assume [measurable]: "A ∈ sets M" "Tinv -` A ∩ space M ∈ null_sets M"
then have "T-`(Tinv -` A ∩ space M) ∩ space M ∈ null_sets M"
by (metis T_quasi_preserves_null2(1) null_sets.Int_space_eq2 vimage_restr_def)
then show "A ∈ null_sets M"
using T_Tinv_of_set[OF assms ‹A ∈ sets M›] by auto
next
show [measurable]: "Tinv ∈ measurable M M"
using assms unfolding Tinv_def invertible_qmpt_def by blast
fix A assume [measurable]: "A ∈ sets M" "A ∈ null_sets M"
then have "T-`(Tinv -` A ∩ space M) ∩ space M ∈ null_sets M"
using T_Tinv_of_set[OF assms ‹A ∈ sets M›] by auto
moreover have [measurable]: "Tinv-`A ∩ space M ∈ sets M"
by auto
ultimately show "Tinv -` A ∩ space M ∈ null_sets M"
using T_meas T_quasi_preserves_null(1) vrestr_of_set by presburger
qed
lemma Tinv_qmpt:
assumes "invertible_qmpt"
shows "qmpt M Tinv"
unfolding qmpt_def qmpt_axioms_def using Tinv_quasi_measure_preserving[OF assms]
by (simp add: sigma_finite_measure_axioms)
end
lemma (in mpt) Tinv_measure_preserving:
assumes "invertible_qmpt"
shows "Tinv ∈ measure_preserving M M"
proof (rule measure_preservingI)
show [measurable]: "Tinv ∈ measurable M M"
using assms unfolding Tinv_def invertible_qmpt_def by blast
fix A assume [measurable]: "A ∈ sets M"
have "A = T-`(Tinv -` A ∩ space M) ∩ space M"
using T_Tinv_of_set[OF assms ‹A ∈ sets M›] by auto
then show "emeasure M (Tinv -` A ∩ space M) = emeasure M A"
by (metis T_vrestr_same_emeasure(1) ‹A ∈ sets M› ‹Tinv ∈ M →⇩M M› measurable_sets sets.Int_space_eq2 vimage_restr_def)
qed
lemma (in mpt) Tinv_mpt:
assumes "invertible_qmpt"
shows "mpt M Tinv"
unfolding mpt_def mpt_axioms_def using Tinv_qmpt[OF assms] Tinv_measure_preserving[OF assms] by auto
lemma (in fmpt) Tinv_fmpt:
assumes "invertible_qmpt"
shows "fmpt M Tinv"
unfolding fmpt_def using Tinv_mpt[OF assms] by (simp add: finite_measure_axioms)
lemma (in pmpt) Tinv_fmpt:
assumes "invertible_qmpt"
shows "pmpt M Tinv"
unfolding pmpt_def using Tinv_fmpt[OF assms] by (simp add: prob_space_axioms)
subsection ‹Factors›
text ‹Factors of a system are quotients of this system, i.e., systems that can be obtained by
a projection, forgetting some part of the dynamics. It is sometimes possible to transfer a result
from a factor to the original system, making it possible to prove theorems by reduction to a
simpler situation.
The dual notion, extension, is equally important and useful. We only mention factors below, as
the results for extension readily follow by considering the original system as a factor of its
extension.
In this paragraph, we define factors both in the qmpt and mpt categories, and prove their basic
properties.
›
definition (in qmpt) qmpt_factor::"('a ⇒ 'b) ⇒ ('b measure) ⇒ ('b ⇒ 'b) ⇒ bool"
where "qmpt_factor proj M2 T2 =
((proj ∈ quasi_measure_preserving M M2) ∧ (AE x in M. proj (T x) = T2 (proj x)) ∧ qmpt M2 T2)"
lemma (in qmpt) qmpt_factorE:
assumes "qmpt_factor proj M2 T2"
shows "proj ∈ quasi_measure_preserving M M2"
"AE x in M. proj (T x) = T2 (proj x)"
"qmpt M2 T2"
using assms unfolding qmpt_factor_def by auto
lemma (in qmpt) qmpt_factor_iterates:
assumes "qmpt_factor proj M2 T2"
shows "AE x in M. ∀n. proj ((T^^n) x) = (T2^^n) (proj x)"
proof -
have "AE x in M. ∀n. proj (T ((T^^n) x)) = T2 (proj ((T^^n) x))"
by (rule T_AE_iterates[OF qmpt_factorE(2)[OF assms]])
moreover
{
fix x assume "∀n. proj (T ((T^^n) x)) = T2 (proj ((T^^n) x))"
then have H: "proj (T ((T^^n) x)) = T2 (proj ((T^^n) x))" for n by auto
have "proj ((T^^n) x) = (T2^^n) (proj x)" for n
apply (induction n) using H by auto
then have "∀n. proj ((T^^n) x) = (T2^^n) (proj x)" by auto
}
ultimately show ?thesis by fast
qed
lemma (in qmpt) qmpt_factorI:
assumes "proj ∈ quasi_measure_preserving M M2"
"AE x in M. proj (T x) = T2 (proj x)"
"qmpt M2 T2"
shows "qmpt_factor proj M2 T2"
using assms unfolding qmpt_factor_def by auto
text ‹When there is a quasi-measure-preserving projection, then the quotient map
automatically is quasi-measure-preserving. The same goes for measure-preservation below.›
lemma (in qmpt) qmpt_factorI':
assumes "proj ∈ quasi_measure_preserving M M2"
"AE x in M. proj (T x) = T2 (proj x)"
"sigma_finite_measure M2"
"T2 ∈ measurable M2 M2"
shows "qmpt_factor proj M2 T2"
proof -
have [measurable]: "T ∈ measurable M M"
"T2 ∈ measurable M2 M2"
"proj ∈ measurable M M2"
using assms(4) quasi_measure_preservingE(1)[OF assms(1)] by auto
have *: "(T2 -` A ∩ space M2 ∈ null_sets M2) = (A ∈ null_sets M2)" if "A ∈ sets M2" for A
proof -
obtain U where U: "⋀x. x ∈ space M - U ⟹ proj (T x) = T2 (proj x)" "U ∈ null_sets M"
using AE_E3[OF assms(2)] by blast
then have [measurable]: "U ∈ sets M" by auto
have [measurable]: "A ∈ sets M2" using that by simp
have e1: "(T-`(proj-`A ∩ space M)) ∩ space M = T-`(proj-`A) ∩ space M"
using subset_eq by auto
have e2: "T-`(proj-`A) ∩ space M - U = proj-`(T2-`A) ∩ space M - U"
using U(1) by auto
have e3: "proj-`(T2-`A) ∩ space M = proj-`(T2-`A ∩ space M2) ∩ space M"
by (auto, meson ‹proj ∈ M →⇩M M2› measurable_space)
have "A ∈ null_sets M2 ⟷ proj-`A ∩ space M ∈ null_sets M"
using quasi_measure_preservingE(2)[OF assms(1)] by simp
also have "... ⟷ (T-`(proj-`A ∩ space M)) ∩ space M ∈ null_sets M"
by (rule quasi_measure_preservingE(2)[OF Tqm, symmetric], auto)
also have "... ⟷ T-`(proj-`A) ∩ space M ∈ null_sets M"
using e1 by simp
also have "... ⟷ T-`(proj-`A) ∩ space M - U ∈ null_sets M"
using emeasure_Diff_null_set[OF ‹U ∈ null_sets M›] unfolding null_sets_def by auto
also have "... ⟷ proj-`(T2-`A) ∩ space M - U ∈ null_sets M"
using e2 by simp
also have "... ⟷ proj-`(T2-`A) ∩ space M ∈ null_sets M"
using emeasure_Diff_null_set[OF ‹U ∈ null_sets M›] unfolding null_sets_def by auto
also have "... ⟷ proj-`(T2-`A ∩ space M2) ∩ space M ∈ null_sets M"
using e3 by simp
also have "... ⟷ T2-`A ∩ space M2 ∈ null_sets M2"
using quasi_measure_preservingE(2)[OF assms(1), of "T2-`A ∩ space M2"] by simp
finally show "T2-`A ∩ space M2 ∈ null_sets M2 ⟷ A ∈ null_sets M2"
by simp
qed
show ?thesis
by (intro qmpt_factorI qmpt_I) (auto simp add: assms *)
qed
lemma qmpt_factor_compose:
assumes "qmpt M1 T1"
"qmpt.qmpt_factor M1 T1 proj1 M2 T2"
"qmpt.qmpt_factor M2 T2 proj2 M3 T3"
shows "qmpt.qmpt_factor M1 T1 (proj2 o proj1) M3 T3"
proof -
have *: "proj1 ∈ quasi_measure_preserving M1 M2 ⟹ AE x in M2. proj2 (T2 x) = T3 (proj2 x)
⟹ (AE x in M1. proj1 (T1 x) = T2 (proj1 x) ⟶ proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x)))"
proof -
assume "AE y in M2. proj2 (T2 y) = T3 (proj2 y)"
"proj1 ∈ quasi_measure_preserving M1 M2"
then have "AE x in M1. proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x))"
using quasi_measure_preserving_AE by auto
moreover
{
fix x assume "proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x))"
then have "proj1 (T1 x) = T2 (proj1 x) ⟶ proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x))"
by auto
}
ultimately show "AE x in M1. proj1 (T1 x) = T2 (proj1 x) ⟶ proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x))"
by auto
qed
interpret I: qmpt M1 T1 using assms(1) by simp
interpret J: qmpt M2 T2 using I.qmpt_factorE(3)[OF assms(2)] by simp
show "I.qmpt_factor (proj2 o proj1) M3 T3"
apply (rule I.qmpt_factorI)
using I.qmpt_factorE[OF assms(2)] J.qmpt_factorE[OF assms(3)]
by (auto simp add: quasi_measure_preserving_comp *)
qed
text ‹The left shift on natural integers is a very natural dynamical system, that can be used to
model many systems as we see below. For invertible systems, one uses rather all the integers.›
definition nat_left_shift::"(nat ⇒ 'a) ⇒ (nat ⇒ 'a)"
where "nat_left_shift x = (λi. x (i+1))"
lemma nat_left_shift_continuous [intro, continuous_intros]:
"continuous_on UNIV nat_left_shift"
by (rule continuous_on_coordinatewise_then_product, auto simp add: nat_left_shift_def)
lemma nat_left_shift_measurable [intro, measurable]:
"nat_left_shift ∈ measurable borel borel"
by (rule borel_measurable_continuous_onI, auto)
definition int_left_shift::"(int ⇒ 'a) ⇒ (int ⇒ 'a)"
where "int_left_shift x = (λi. x (i+1))"
definition int_right_shift::"(int ⇒ 'a) ⇒ (int ⇒ 'a)"
where "int_right_shift x = (λi. x (i-1))"
lemma int_shift_continuous [intro, continuous_intros]:
"continuous_on UNIV int_left_shift"
"continuous_on UNIV int_right_shift"
apply (rule continuous_on_coordinatewise_then_product, auto simp add: int_left_shift_def)
apply (rule continuous_on_coordinatewise_then_product, auto simp add: int_right_shift_def)
done
lemma int_shift_measurable [intro, measurable]:
"int_left_shift ∈ measurable borel borel"
"int_right_shift ∈ measurable borel borel"
by (rule borel_measurable_continuous_onI, auto)+
lemma int_shift_bij:
"bij int_left_shift" "inv int_left_shift = int_right_shift"
"bij int_right_shift" "inv int_right_shift = int_left_shift"
proof -
show "bij int_left_shift"
apply (rule bij_betw_byWitness[where ?f' = "λx. (λi. x (i-1))"]) unfolding int_left_shift_def by auto
show "inv int_left_shift = int_right_shift"
apply (rule inv_equality)
unfolding int_left_shift_def int_right_shift_def by auto
show "bij int_right_shift"
apply (rule bij_betw_byWitness[where ?f' = "λx. (λi. x (i+1))"]) unfolding int_right_shift_def by auto
show "inv int_right_shift = int_left_shift"
apply (rule inv_equality)
unfolding int_left_shift_def int_right_shift_def by auto
qed
lemma (in qmpt) qmpt_factor_projection:
fixes f::"'a ⇒ ('b::second_countable_topology)"
assumes [measurable]: "f ∈ borel_measurable M"
and "sigma_finite_measure (distr M borel (λx n. f ((T ^^ n) x)))"
shows "qmpt_factor (λx. (λn. f ((T^^n)x))) (distr M borel (λx. (λn. f ((T^^n)x)))) nat_left_shift"
proof (rule qmpt_factorI')
have * [measurable]: "(λx. (λn. f ((T^^n)x))) ∈ borel_measurable M"
using measurable_coordinatewise_then_product by measurable
show "(λx n. f ((T ^^ n) x)) ∈ quasi_measure_preserving M (distr M borel (λx n. f ((T ^^ n) x)))"
by (rule measure_preserving_is_quasi_measure_preserving[OF measure_preserving_distr'[OF *]])
have "(λn. f ((T ^^ n) (T x))) = nat_left_shift (λn. f ((T ^^ n) x))" for x
unfolding nat_left_shift_def by (auto simp add: funpow_swap1)
then show "AE x in M. (λn. f ((T ^^ n) (T x))) = nat_left_shift (λn. f ((T ^^ n) x))"
by simp
qed (auto simp add: assms(2))
text ‹Let us now define factors of measure-preserving transformations, in the same way
as above.›
definition (in mpt) mpt_factor::"('a ⇒ 'b) ⇒ ('b measure) ⇒ ('b ⇒ 'b) ⇒ bool"
where "mpt_factor proj M2 T2 =
((proj ∈ measure_preserving M M2) ∧ (AE x in M. proj (T x) = T2 (proj x)) ∧ mpt M2 T2)"
lemma (in mpt) mpt_factor_is_qmpt_factor:
assumes "mpt_factor proj M2 T2"
shows "qmpt_factor proj M2 T2"
using assms unfolding mpt_factor_def qmpt_factor_def
by (simp add: measure_preserving_is_quasi_measure_preserving mpt_def)
lemma (in mpt) mpt_factorE:
assumes "mpt_factor proj M2 T2"
shows "proj ∈ measure_preserving M M2"
"AE x in M. proj (T x) = T2 (proj x)"
"mpt M2 T2"
using assms unfolding mpt_factor_def by auto
lemma (in mpt) mpt_factorI:
assumes "proj ∈ measure_preserving M M2"
"AE x in M. proj (T x) = T2 (proj x)"
"mpt M2 T2"
shows "mpt_factor proj M2 T2"
using assms unfolding mpt_factor_def by auto
text ‹When there is a measure-preserving projection commuting with the dynamics, and the
dynamics above preserves the measure, then so does the dynamics below.›
lemma (in mpt) mpt_factorI':
assumes "proj ∈ measure_preserving M M2"
"AE x in M. proj (T x) = T2 (proj x)"
"sigma_finite_measure M2"
"T2 ∈ measurable M2 M2"
shows "mpt_factor proj M2 T2"
proof -
have [measurable]: "T ∈ measurable M M"
"T2 ∈ measurable M2 M2"
"proj ∈ measurable M M2"
using assms(4) measure_preservingE(1)[OF assms(1)] by auto
have *: "emeasure M2 (T2 -` A ∩ space M2) = emeasure M2 A" if "A ∈ sets M2" for A
proof -
obtain U where U: "⋀x. x ∈ space M - U ⟹ proj (T x) = T2 (proj x)" "U ∈ null_sets M"
using AE_E3[OF assms(2)] by blast
then have [measurable]: "U ∈ sets M" by auto
have [measurable]: "A ∈ sets M2" using that by simp
have e1: "(T-`(proj-`A ∩ space M)) ∩ space M = T-`(proj-`A) ∩ space M"
using subset_eq by auto
have e2: "T-`(proj-`A) ∩ space M - U = proj-`(T2-`A) ∩ space M - U"
using U(1) by auto
have e3: "proj-`(T2-`A) ∩ space M = proj-`(T2-`A ∩ space M2) ∩ space M"
by (auto, meson ‹proj ∈ M →⇩M M2› measurable_space)
have "emeasure M2 A = emeasure M (proj-`A ∩ space M)"
using measure_preservingE(2)[OF assms(1)] by simp
also have "... = emeasure M (T-`(proj-`A ∩ space M) ∩ space M)"
by (rule measure_preservingE(2)[OF Tm, symmetric], auto)
also have "... = emeasure M (T-`(proj-`A) ∩ space M)"
using e1 by simp
also have "... = emeasure M (T-`(proj-`A) ∩ space M - U)"
using emeasure_Diff_null_set[OF ‹U ∈ null_sets M›] by auto
also have "... = emeasure M (proj-`(T2-`A) ∩ space M - U)"
using e2 by simp
also have "... = emeasure M (proj-`(T2-`A) ∩ space M)"
using emeasure_Diff_null_set[OF ‹U ∈ null_sets M›] by auto
also have "... = emeasure M (proj-`(T2-`A ∩ space M2) ∩ space M)"
using e3 by simp
also have "... = emeasure M2 (T2-`A ∩ space M2)"
using measure_preservingE(2)[OF assms(1), of "T2-`A ∩ space M2"] by simp
finally show "emeasure M2 (T2-`A ∩ space M2) = emeasure M2 A"
by simp
qed
show ?thesis
by (intro mpt_factorI mpt_I) (auto simp add: assms *)
qed
lemma (in fmpt) mpt_factorI'':
assumes "proj ∈ measure_preserving M M2"
"AE x in M. proj (T x) = T2 (proj x)"
"T2 ∈ measurable M2 M2"
shows "mpt_factor proj M2 T2"
apply (rule mpt_factorI', auto simp add: assms)
using measure_preserving_finite_measure[OF assms(1)] finite_measure_axioms finite_measure_def by blast
lemma (in fmpt) fmpt_factor:
assumes "mpt_factor proj M2 T2"
shows "fmpt M2 T2"
unfolding fmpt_def using mpt_factorE(3)[OF assms]
measure_preserving_finite_measure[OF mpt_factorE(1)[OF assms]] finite_measure_axioms by auto
lemma (in pmpt) pmpt_factor:
assumes "mpt_factor proj M2 T2"
shows "pmpt M2 T2"
unfolding pmpt_def using fmpt_factor[OF assms]
measure_preserving_prob_space[OF mpt_factorE(1)[OF assms]] prob_space_axioms by auto
lemma mpt_factor_compose:
assumes "mpt M1 T1"
"mpt.mpt_factor M1 T1 proj1 M2 T2"
"mpt.mpt_factor M2 T2 proj2 M3 T3"
shows "mpt.mpt_factor M1 T1 (proj2 o proj1) M3 T3"
proof -
have *: "proj1 ∈ measure_preserving M1 M2 ⟹ AE x in M2. proj2 (T2 x) = T3 (proj2 x) ⟹
(AE x in M1. proj1 (T1 x) = T2 (proj1 x) ⟶ proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x)))"
proof -
assume "AE y in M2. proj2 (T2 y) = T3 (proj2 y)"
"proj1 ∈ measure_preserving M1 M2"
then have "AE x in M1. proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x))"
using quasi_measure_preserving_AE measure_preserving_is_quasi_measure_preserving by blast
moreover
{
fix x assume "proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x))"
then have "proj1 (T1 x) = T2 (proj1 x) ⟶ proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x))"
by auto
}
ultimately show "AE x in M1. proj1 (T1 x) = T2 (proj1 x) ⟶ proj2 (T2 (proj1 x)) = T3 (proj2 (proj1 x))"
by auto
qed
interpret I: mpt M1 T1 using assms(1) by simp
interpret J: mpt M2 T2 using I.mpt_factorE(3)[OF assms(2)] by simp
show "I.mpt_factor (proj2 o proj1) M3 T3"
apply (rule I.mpt_factorI)
using I.mpt_factorE[OF assms(2)] J.mpt_factorE[OF assms(3)]
by (auto simp add: measure_preserving_comp *)
qed
text ‹Left shifts are naturally factors of finite measure preserving transformations.›
lemma (in mpt) mpt_factor_projection:
fixes f::"'a ⇒ ('b::second_countable_topology)"
assumes [measurable]: "f ∈ borel_measurable M"
and "sigma_finite_measure (distr M borel (λx n. f ((T ^^ n) x)))"
shows "mpt_factor (λx. (λn. f ((T^^n)x))) (distr M borel (λx. (λn. f ((T^^n)x)))) nat_left_shift"
proof (rule mpt_factorI')
have * [measurable]: "(λx. (λn. f ((T^^n)x))) ∈ borel_measurable M"
using measurable_coordinatewise_then_product by measurable
show "(λx n. f ((T ^^ n) x)) ∈ measure_preserving M (distr M borel (λx n. f ((T ^^ n) x)))"
by (rule measure_preserving_distr'[OF *])
have "(λn. f ((T ^^ n) (T x))) = nat_left_shift (λn. f ((T ^^ n) x))" for x
unfolding nat_left_shift_def by (auto simp add: funpow_swap1)
then show "AE x in M. (λn. f ((T ^^ n) (T x))) = nat_left_shift (λn. f ((T ^^ n) x))"
by simp
qed (auto simp add: assms(2))
lemma (in fmpt) fmpt_factor_projection:
fixes f::"'a ⇒ ('b::second_countable_topology)"
assumes [measurable]: "f ∈ borel_measurable M"
shows "mpt_factor (λx. (λn. f ((T^^n)x))) (distr M borel (λx. (λn. f ((T^^n)x)))) nat_left_shift"
proof (rule mpt_factor_projection, simp add: assms)
have * [measurable]: "(λx. (λn. f ((T^^n)x))) ∈ borel_measurable M"
using measurable_coordinatewise_then_product by measurable
have **: "(λx n. f ((T ^^ n) x)) ∈ measure_preserving M (distr M borel (λx n. f ((T ^^ n) x)))"
by (rule measure_preserving_distr'[OF *])
have a: "finite_measure (distr M borel (λx n. f ((T ^^ n) x)))"
using measure_preserving_finite_measure[OF **] finite_measure_axioms by blast
then show "sigma_finite_measure (distr M borel (λx n. f ((T ^^ n) x)))"
by (simp add: finite_measure_def)
qed
subsection ‹Natural extension›
text ‹Many probability preserving dynamical systems are not invertible, while invertibility is
often useful in proofs. The notion of natural extension is a solution to this problem: it shows that
(essentially) any system has an extension which is invertible.
This extension is constructed by considering the space of orbits indexed by integer numbers, with
the left shift acting on it. If one considers the orbits starting from time $-N$
(for some fixed $N$), then there is a natural measure on this space: such an orbit is
parameterized by its starting point at time $-N$, hence one may use the original measure on this
point. The invariance of the measure ensures that these measures are compatible with each other.
Their projective limit (when $N$ tends to infinity) is thus an invariant measure on the bilateral
shift. The shift with this measure is the desired extension of the original system.
There is a difficulty in the above argument: one needs to make sure that the projective limit of
a system of compatible measures is well defined. This requires some topological conditions on the
measures (they should be inner regular, i.e., the measure of any set should be approximated from
below by compact subsets -- this is automatic on polish spaces). The existence of projective limits
is proved in \verb+Projective_Limits.thy+ under the (sufficient) polish condition. We use this
theory, so we need the underlying space to be a polish space and the measure to be a Borel
measure. This is almost completely satisfactory.
What is not completely satisfactory is that the completion of a Borel measure on a polish space
(i.e., we add all subsets of sets of measure $0$ into the sigma algebra) does not fit into this
setting, while this is an important framework in dynamical systems. It would readily follow
once \verb+Projective_Limits.thy+ is extended to the more general inner regularity setting
(the completion of a Borel measure on a polish space is always inner regular).
›
locale polish_pmpt = pmpt "M::('a::polish_space measure)" T for M T
+ assumes M_eq_borel: "sets M = sets borel"
begin
definition natural_extension_map
where "natural_extension_map = (int_left_shift::((int ⇒ 'a) ⇒ (int ⇒ 'a)))"
definition natural_extension_measure::"(int ⇒ 'a) measure"
where "natural_extension_measure =
projective_family.lim UNIV (λI. distr M (Π⇩M i∈I. borel) (λx. (λi∈I. (T^^(nat(i- Min I))) x))) (λi. borel)"
definition natural_extension_proj::"(int ⇒ 'a) ⇒ 'a"
where "natural_extension_proj = (λx. x 0)"
theorem natural_extension:
"pmpt natural_extension_measure natural_extension_map"
"qmpt.invertible_qmpt natural_extension_measure natural_extension_map"
"mpt.mpt_factor natural_extension_measure natural_extension_map natural_extension_proj M T"
proof -
define P::"int set ⇒ (int ⇒ 'a) measure" where
"P = (λI. distr M (Π⇩M i∈I. borel) (λx. (λi∈I. (T^^(nat(i- Min I))) x)))"
have [measurable]: "(T^^n) ∈ measurable M borel" for n
using M_eq_borel by auto
interpret polish_projective UNIV P
unfolding polish_projective_def projective_family_def
proof (auto)
show "prob_space (P I)" if "finite I" for I unfolding P_def by (rule prob_space_distr, auto)
fix J H::"int set" assume "J ⊆ H" "finite H"
then have "H ∩ J = J" by blast
have "((λf. restrict f J) o (λx. (λi∈H. (T^^(nat(i- Min H))) x))) x
= ((λx. (λi∈J. (T^^(nat(i- Min J))) x)) o (T^^(nat(Min J - Min H)))) x" for x
proof -
have "nat(i- Min H) = nat(i- Min J) + nat(Min J - Min H)" if "i ∈ J" for i
proof -
have "finite J" using ‹J ⊆ H› ‹finite H› finite_subset by auto
then have "Min J ∈ J" using Min_in ‹i ∈ J› by auto
then have "Min J ∈ H" using ‹J ⊆ H› by blast
then have "Min H ≤ Min J" using Min.coboundedI[OF ‹finite H›] by auto
moreover have "Min J ≤ i" using Min.coboundedI[OF ‹finite J› ‹i ∈ J›] by auto
ultimately show ?thesis by auto
qed
then show ?thesis
unfolding comp_def by (auto simp add: ‹H ∩ J = J› funpow_add)
qed
then have *: "(λf. restrict f J) o (λx. (λi∈H. (T^^(nat(i- Min H))) x))
= (λx. (λi∈J. (T^^(nat(i- Min J))) x)) o (T^^(nat(Min J - Min H)))"
by auto
have "distr (P H) (Pi⇩M J (λ_. borel)) (λf. restrict f J)
= distr M (Π⇩M i∈J. borel) ((λf. restrict f J) o (λx. (λi∈H. (T^^(nat(i- Min H))) x)))"
unfolding P_def by (rule distr_distr, auto simp add: ‹J ⊆ H› measurable_restrict_subset)
also have "... = distr M (Π⇩M i∈J. borel) ((λx. (λi∈J. (T^^(nat(i- Min J))) x)) o (T^^(nat(Min J - Min H))))"
using * by auto
also have "... = distr (distr M M (T^^(nat(Min J - Min H)))) (Π⇩M i∈J. borel) (λx. (λi∈J. (T^^(nat(i- Min J))) x))"
by (rule distr_distr[symmetric], auto)
also have "... = distr M (Π⇩M i∈J. borel) (λx. (λi∈J. (T^^(nat(i- Min J))) x))"
using measure_preserving_distr[OF Tn_measure_preserving] by auto
also have "... = P J"
unfolding P_def by auto
finally show "P J = distr (P H) (Pi⇩M J (λ_. borel)) (λf. restrict f J)"
by simp
qed
have S: "sets (Pi⇩M UNIV (λ_. borel)) = sets (borel::(int ⇒ 'a) measure)"
by (rule sets_PiM_equal_borel)
have "natural_extension_measure = lim"
unfolding natural_extension_measure_def P_def by simp
have "measurable lim lim = measurable borel borel"
by (rule measurable_cong_sets, auto simp add: S)
then have [measurable]: "int_left_shift ∈ measurable lim lim" "int_right_shift ∈ measurable lim lim"
using int_shift_measurable by fast+
have [simp]: "space lim = UNIV"
unfolding space_lim space_PiM space_borel by auto
show "pmpt natural_extension_measure natural_extension_map"
proof (rule pmpt_I)
show "prob_space natural_extension_measure"
unfolding ‹natural_extension_measure = lim› by (simp add: P.prob_space_axioms)
show "natural_extension_map ∈ measurable natural_extension_measure natural_extension_measure"
unfolding natural_extension_map_def ‹natural_extension_measure = lim› by simp
define E where "E = {(Π⇩E i∈UNIV. X i) |X::(int ⇒ 'a set). (∀i. X i ∈ sets borel) ∧ finite {i. X i ≠ UNIV}}"
have "lim = distr lim lim int_left_shift"
proof (rule measure_eqI_generator_eq[of E UNIV, where ?A = "λ_. UNIV"])
show "sets lim = sigma_sets UNIV E"
unfolding E_def using sets_PiM_finite[of "UNIV::int set" "λ_. (borel::'a measure)"]
by (simp add: PiE_def)
moreover have "sets (distr lim lim int_left_shift) = sets lim" by auto
ultimately show "sets (distr lim lim int_left_shift) = sigma_sets UNIV E" by simp
show "emeasure lim UNIV ≠ ∞" by (simp add: P.prob_space_axioms)
have "UNIV = (Π⇩E i∈(UNIV::int set). (UNIV::'a set))" by (simp add: PiE_def)
moreover have "... ∈ E" unfolding E_def by auto
ultimately show "range (λ(i::nat). (UNIV::(int ⇒ 'a) set)) ⊆ E"
by auto
show "Int_stable E"
proof (rule Int_stableI)
fix U V assume "U ∈ E" "V ∈ E"
then obtain X Y where H: "U = (Π⇩E i∈UNIV. X i)" "⋀i. X i ∈ sets borel" "finite {i. X i ≠ UNIV}"
"V = (Π⇩E i∈UNIV. Y i)" "⋀i. Y i ∈ sets borel" "finite {i. Y i ≠ UNIV}"
unfolding E_def by blast
define Z where "Z = (λi. X i ∩ Y i)"
have "{i. Z i ≠ UNIV} ⊆ {i. X i ≠ UNIV} ∪ {i. Y i ≠ UNIV}"
unfolding Z_def by auto
then have "finite {i. Z i ≠ UNIV}"
using H(3) H(6) finite_subset by auto
moreover have "U ∩ V = (Π⇩E i∈UNIV. Z i)"
unfolding Z_def using H(1) H(4) by auto
moreover have "⋀i. Z i ∈ sets borel"
unfolding Z_def using H(2) H(5) by auto
ultimately show "U ∩ V ∈ E"
unfolding E_def by auto
qed
fix U assume "U ∈ E"
then obtain X where H [measurable]: "U = (Π⇩E i∈UNIV. X i)" "⋀i. X i ∈ sets borel" "finite {i. X i ≠ UNIV}"
unfolding E_def by blast
define I where "I = {i. X i ≠ UNIV}"
have [simp]: "finite I" unfolding I_def using H(3) by auto
have [measurable]: "(Π⇩E i∈I. X i) ∈ sets (Pi⇩M I (λi. borel))" using H(2) by simp
have *: "U = emb UNIV I (Π⇩E i∈I. X i)"
unfolding H(1) I_def prod_emb_def space_borel apply (auto simp add: PiE_def)
by (metis (mono_tags, lifting) PiE UNIV_I mem_Collect_eq restrict_Pi_cancel)
have "emeasure lim U = emeasure lim (int_left_shift-`U)"
proof (cases "I = {}")
case True
then have "U = UNIV" unfolding H(1) I_def by auto
then show ?thesis by auto
next
case False
have "emeasure lim U = emeasure (P I) (Π⇩E i∈I. X i)"
unfolding * by (rule emeasure_lim_emb, auto)
also have "... = emeasure M (((λx. (λi∈I. (T^^(nat(i- Min I))) x)))-`(Π⇩E i∈I. X i) ∩ space M)"
unfolding P_def by (rule emeasure_distr, auto)
finally have A: "emeasure lim U = emeasure M (((λx. (λi∈I. (T^^(nat(i- Min I))) x)))-`(Π⇩E i∈I. X i) ∩ space M)"
by simp
have i: "int_left_shift-`U = (Π⇩E i∈UNIV. X (i-1))"
unfolding H(1) apply (auto simp add: int_left_shift_def PiE_def)
by (metis PiE UNIV_I diff_add_cancel, metis Pi_mem add.commute add_diff_cancel_left' iso_tuple_UNIV_I)
define Im where "Im = {i. X (i-1) ≠ UNIV}"
have "Im = (λi. i+1)`I"
unfolding I_def Im_def using image_iff by (auto, fastforce)
then have [simp]: "finite Im" by auto
have *: "int_left_shift-`U = emb UNIV Im (Π⇩E i∈Im. X (i-1))"
unfolding i Im_def prod_emb_def space_borel apply (auto simp add: PiE_def)
by (metis (mono_tags, lifting) PiE UNIV_I mem_Collect_eq restrict_Pi_cancel)
have "emeasure lim (int_left_shift-`U) = emeasure (P Im) (Π⇩E i∈Im. X (i-1))"
unfolding * by (rule emeasure_lim_emb, auto)
also have "... = emeasure M (((λx. (λi∈Im. (T^^(nat(i- Min Im))) x)))-`(Π⇩E i∈Im. X (i-1)) ∩ space M)"
unfolding P_def by (rule emeasure_distr, auto)
finally have B: "emeasure lim (int_left_shift-`U) = emeasure M (((λx. (λi∈Im. (T^^(nat(i- Min Im))) x)))-`(Π⇩E i∈Im. X (i-1)) ∩ space M)"
by simp
have "Min Im = Min I + 1" unfolding ‹Im = (λi. i+1)`I›
by (rule mono_Min_commute[symmetric], auto simp add: False monoI)
have "((λx. (λi∈Im. (T^^(nat(i- Min Im))) x)))-`(Π⇩E i∈Im. X (i-1)) = ((λx. (λi∈I. (T^^(nat(i- Min I))) x)))-`(Π⇩E i∈I. X i)"
unfolding ‹Min Im = Min I + 1› unfolding ‹Im = (λi. i+1)`I› by (auto simp add: Pi_iff)
then show "emeasure lim U = emeasure lim (int_left_shift -` U)" using A B by auto
qed
also have "... = emeasure lim (int_left_shift-`U ∩ space lim)"
unfolding ‹space lim = UNIV› by auto
also have "... = emeasure (distr lim lim int_left_shift) U"
apply (rule emeasure_distr[symmetric], auto) using * by auto
finally show "emeasure lim U = emeasure (distr lim lim int_left_shift) U"
by simp
qed (auto)
fix U assume "U ∈ sets natural_extension_measure"
then have [measurable]: "U ∈ sets lim" using ‹natural_extension_measure = lim› by simp
have "emeasure natural_extension_measure (natural_extension_map -` U ∩ space natural_extension_measure)
= emeasure lim (int_left_shift-`U ∩ space lim)"
unfolding ‹natural_extension_measure = lim› natural_extension_map_def by simp
also have "... = emeasure (distr lim lim int_left_shift) U"
apply (rule emeasure_distr[symmetric], auto) using ‹U ∈ P.events› by auto
also have "... = emeasure lim U"
using ‹lim = distr lim lim int_left_shift› by simp
also have "... = emeasure natural_extension_measure U"
using ‹natural_extension_measure = lim› by simp
finally show "emeasure natural_extension_measure (natural_extension_map -` U ∩ space natural_extension_measure)
= emeasure natural_extension_measure U"
by simp
qed
then interpret I: pmpt natural_extension_measure natural_extension_map by simp
show "I.invertible_qmpt"
unfolding I.invertible_qmpt_def unfolding natural_extension_map_def ‹natural_extension_measure = lim›
by (auto simp add: int_shift_bij)
show "I.mpt_factor natural_extension_proj M T" unfolding I.mpt_factor_def
proof (auto)
show "mpt M T" by (simp add: mpt_axioms)
show "natural_extension_proj ∈ measure_preserving natural_extension_measure M"
unfolding ‹natural_extension_measure = lim›
proof
have *: "measurable lim M = measurable borel borel"
apply (rule measurable_cong_sets) using sets_PiM_equal_borel M_eq_borel by auto
show "natural_extension_proj ∈ measurable lim M"
unfolding * natural_extension_proj_def by auto
fix U assume [measurable]: "U ∈ sets M"
have *: "(((λx. λi∈{0}. (T ^^ nat (i - Min {0})) x))-` ({0} →⇩E U) ∩ space M) = U"
using sets.sets_into_space[OF ‹U ∈ sets M›] by auto
have "natural_extension_proj-`U ∩ space lim = emb UNIV {0} (Π⇩E i∈{0}. U)"
unfolding ‹space lim = UNIV› natural_extension_proj_def prod_emb_def by (auto simp add: PiE_iff)
then have "emeasure lim (natural_extension_proj-`U ∩ space lim) = emeasure lim (emb UNIV {0} (Π⇩E i∈{0}. U))"
by simp
also have "... = emeasure (P {0}) (Π⇩E i∈{0}. U)"
apply (rule emeasure_lim_emb, auto) using ‹U ∈ sets M› M_eq_borel by auto
also have "... = emeasure M (((λx. λi∈{0}. (T ^^ nat (i - Min {0})) x))-` ({0} →⇩E U) ∩ space M)"
unfolding P_def apply (rule emeasure_distr) using ‹U ∈ sets M› M_eq_borel by auto
also have "... = emeasure M U"
using * by simp
finally show "emeasure lim (natural_extension_proj-`U ∩ space lim) = emeasure M U" by simp
qed
define U::"(int ⇒ 'a) set" where "U = {x ∈ space (Pi⇩M {0, 1} (λi. borel)). x 1 = T (x 0)}"
have *: "((λx. λi∈{0, 1}. (T ^^ nat (i - Min {0, 1})) x))-` U ∩ space M = space M"
unfolding U_def space_PiM space_borel by auto
have [measurable]: "T ∈ measurable borel borel"
using M_eq_borel by auto
have [measurable]: "U ∈ sets (Pi⇩M {0, 1} (λi. borel))"
unfolding U_def by (rule measurable_equality_set, auto)
have "emeasure natural_extension_measure (emb UNIV {0, 1} U) = emeasure (P {0, 1}) U"
unfolding ‹natural_extension_measure = lim› by (rule emeasure_lim_emb, auto)
also have "... = emeasure M (((λx. λi∈{0, 1}. (T ^^ nat (i - Min {0, 1})) x))-` U ∩ space M)"
unfolding P_def by (rule emeasure_distr, auto)
also have "... = emeasure M (space M)"
using * by simp
also have "... = 1" by (simp add: emeasure_space_1)
finally have *: "emeasure natural_extension_measure (emb UNIV {0, 1} U) = 1" by simp
have "AE x in natural_extension_measure. x ∈ emb UNIV {0, 1} U"
apply (rule I.AE_prob_1) using * by (simp add: I.emeasure_eq_measure)
moreover
{
fix x assume "x ∈ emb UNIV {0, 1} U"
then have "x 1 = T (x 0)" unfolding prod_emb_def U_def by auto
then have "natural_extension_proj (natural_extension_map x) = T (natural_extension_proj x)"
unfolding natural_extension_proj_def natural_extension_map_def int_left_shift_def by auto
}
ultimately show "AE x in natural_extension_measure.
natural_extension_proj (natural_extension_map x) = T (natural_extension_proj x)"
by auto
qed
qed
end
end
Theory Recurrence
section ‹Conservativity, recurrence›
theory Recurrence
imports Measure_Preserving_Transformations
begin
text ‹A dynamical system is conservative if almost every point comes back close to its starting point.
This is always the case if the measure is finite, not when it is infinite (think of the translation
on $\mathbb{Z}$). In conservative systems, an important construction is the induced map: the first return
map to a set of finite measure. It is measure-preserving and conservative if the original system is.
This makes it possible to reduce statements about general conservative systems in infinite measure
to statements about systems in finite measure, and as such is extremely useful.›
subsection ‹Definition of conservativity›
locale conservative = qmpt +
assumes conservative: "⋀A. A ∈ sets M ⟹ emeasure M A > 0 ⟹ ∃n>0. emeasure M ((T^^n)-`A ∩ A) >0"
lemma conservativeI:
assumes "qmpt M T"
"⋀A. A ∈ sets M ⟹ emeasure M A > 0 ⟹ ∃n>0. emeasure M ((T^^n)-`A ∩ A) >0"
shows "conservative M T"
unfolding conservative_def conservative_axioms_def using assms by auto
text ‹To prove conservativity, it is in fact sufficient to show that the preimages of a set
of positive measure intersect it, without any measure control. Indeed, in a non-conservative
system, one can construct a set which does not satisfy this property.›
lemma conservativeI2:
assumes "qmpt M T"
"⋀A. A ∈ sets M ⟹ emeasure M A > 0 ⟹ ∃n>0. (T^^n)-`A ∩ A ≠ {}"
shows "conservative M T"
unfolding conservative_def conservative_axioms_def
proof (auto simp add: assms)
interpret qmpt M T using assms by auto
fix A
assume A_meas [measurable]: "A ∈ sets M" and "emeasure M A > 0"
show "∃n>0. 0 < emeasure M ((T ^^ n) -` A ∩ A)"
proof (rule ccontr)
assume "¬ (∃n>0. 0 < emeasure M ((T ^^ n) -` A ∩ A))"
then have meas_0: "emeasure M ((T ^^ n) -` A ∩ A) = 0" if "n>0" for n
by (metis zero_less_iff_neq_zero that)
define C where "C = (⋃n. (T^^(Suc n))-`A ∩ A)"
have C_meas [measurable]: "C ∈ sets M" unfolding C_def by measurable
have "emeasure M C = 0" unfolding C_def
by (intro emeasure_UN_eq_0[of M, of "λn. (T^^(Suc n))-`A ∩ A", OF meas_0], auto)
define A2 where "A2 = A-C"
then have A2_meas [measurable]: "A2 ∈ sets M" by simp
have "¬(∃n>0. (T^^n)-`A2 ∩ A2 ≠ {})"
proof (rule ccontr, simp)
assume "∃n>0. (T^^n)-`A2 ∩ A2 ≠ {}"
then obtain n where n: "n > 0" "(T^^n)-`A2 ∩ A2 ≠ {}" by auto
define m where "m = n-1"
have "(T^^(m+1))-`A2 ∩ A2 ≠ {}" unfolding m_def using n by auto
then show False using C_def A2_def by auto
qed
then have "emeasure M A2 = 0" using assms(2)[OF A2_meas] by (meson zero_less_iff_neq_zero)
then have "emeasure M (C ∪ A2) = 0" using ‹emeasure M C = 0› by (simp add: emeasure_Un_null_set null_setsI)
moreover have "A ⊆ C ∪ A2" unfolding A2_def by auto
ultimately have "emeasure M A = 0" by (meson A2_meas C_meas emeasure_eq_0 sets.Un)
then show False using ‹emeasure M A > 0› by auto
qed
qed
text ‹There is also a dual formulation, saying that conservativity follows from the fact
that a set disjoint from all its preimages has to be null.›
lemma conservativeI3:
assumes "qmpt M T"
"⋀A. A ∈ sets M ⟹ (∀n>0. (T^^n)-`A ∩ A = {}) ⟹ A ∈ null_sets M"
shows "conservative M T"
proof (rule conservativeI2[OF assms(1)])
fix A assume "A ∈ sets M" "0 < emeasure M A"
then have "¬(A ∈ null_sets M)" unfolding null_sets_def by auto
then show "∃n>0. (T ^^ n) -` A ∩ A ≠ {}"
using assms(2)[OF ‹A ∈ sets M›] by auto
qed
text ‹The inverse of a conservative map is still conservative›
lemma (in conservative) conservative_Tinv:
assumes "invertible_qmpt"
shows "conservative M Tinv"
proof (rule conservativeI2)
show "qmpt M Tinv" using Tinv_qmpt[OF assms].
have "bij T" using assms unfolding invertible_qmpt_def by auto
fix A assume [measurable]: "A ∈ sets M" and "emeasure M A > 0"
then obtain n where *: "n>0" "emeasure M ((T^^n)-`A ∩ A) > 0"
using conservative[OF ‹A ∈ sets M› ‹emeasure M A > 0›] by blast
have "bij (T^^n)" using bij_fn[OF ‹bij T›] by auto
then have "bij(inv (T^^n))" using bij_imp_bij_inv by auto
then have "bij (Tinv^^n)" unfolding Tinv_def using inv_fn[OF ‹bij T›, of n] by auto
have "(T^^n)-`A ∩ A ≠ {}" using * by auto
then have "(Tinv^^n)-`((T^^n)-`A ∩ A) ≠ {}"
using surj_vimage_empty[OF bij_is_surj[OF ‹bij (Tinv^^n)›]] by meson
then have **: "(Tinv^^n)-`((T^^n)-`A) ∩ (Tinv^^n)-` A ≠ {}"
by auto
have "(Tinv^^n)-`((T^^n)-`A) = ((T^^n) o (Tinv^^n))-`A"
by auto
moreover have "(T^^n) o (Tinv^^n) = (λx. x)"
unfolding Tinv_def using ‹bij T› fn_o_inv_fn_is_id by blast
ultimately have "(Tinv^^n)-`((T^^n)-`A) = A" by auto
then have "(Tinv^^n)-` A ∩ A ≠ {}" using ** by auto
then show "∃n>0. (Tinv ^^ n) -` A ∩ A ≠ {}" using ‹n>0› by auto
qed
text ‹We introduce the locale of a conservative measure preserving map.›
locale conservative_mpt = mpt + conservative
lemma conservative_mptI:
assumes "mpt M T"
"⋀A. A ∈ sets M ⟹ emeasure M A > 0 ⟹ ∃n>0. (T^^n)-`A ∩ A ≠ {}"
shows "conservative_mpt M T"
unfolding conservative_mpt_def
apply (auto simp add: assms(1), rule conservativeI2)
using assms(1) by (auto simp add: mpt_def assms(2))
text ‹The fact that finite measure preserving transformations are conservative, albeit easy,
is extremely important. This result is known as Poincaré recurrence theorem.›
sublocale fmpt ⊆ conservative_mpt
proof (rule conservative_mptI)
show "mpt M T" by (simp add: mpt_axioms)
fix A assume A_meas [measurable]: "A ∈ sets M" and "emeasure M A > 0"
show "∃n>0. (T^^n)-`A ∩ A ≠ {}"
proof (rule ccontr)
assume "¬(∃n>0. (T^^n)-`A ∩ A ≠ {})"
then have disj: "(T^^(Suc n))--`A ∩ A = {}" for n unfolding vimage_restr_def using zero_less_one by blast
define B where "B = (λ n. (T^^n)--`A)"
then have B_meas [measurable]: "B n ∈ sets M" for n by simp
have same: "measure M (B n) = measure M A" for n
by (simp add: B_def A_meas T_vrestr_same_measure(2))
have "B n ∩ B m = {}" if "n > m" for m n
proof -
have "B n ∩ B m = (T^^m)--` (B (n-m) ∩ A)"
using B_def ‹m < n› A_meas vrestr_intersec T_vrestr_composed(1) by auto
moreover have "B (n-m) ∩ A = {}" unfolding B_def
by (metis disj ‹m < n› Suc_diff_Suc)
ultimately show ?thesis by simp
qed
then have "disjoint_family B" by (metis disjoint_family_on_def inf_sup_aci(1) less_linear)
have "measure M A < e" if "e>0" for e::real
proof -
obtain N::nat where "N>0" "(measure M (space M))/e<N" using ‹0 < e›
by (metis divide_less_0_iff reals_Archimedean2 less_eq_real_def measure_nonneg not_gr0 not_le of_nat_0)
then have "(measure M (space M))/N < e" using ‹0 < e› ‹N>0›
by (metis bounded_measure div_0 le_less_trans measure_empty mult.commute pos_divide_less_eq)
have *: "disjoint_family_on B {..<N}"
by (meson UNIV_I ‹disjoint_family B› disjoint_family_on_mono subsetI)
then have "(∑i∈{..<N}. measure M (B i)) ≤ measure M (space M)"
by (metis bounded_measure ‹⋀n. B n ∈ sets M›
image_subset_iff finite_lessThan finite_measure_finite_Union)
also have "(∑i∈{..<N}. measure M (B i)) = (∑i∈{..<N}. measure M A)" using same by simp
also have "... = N * (measure M A)" by simp
finally have "N * (measure M A) ≤ measure M (space M)" by simp
then have "measure M A ≤ (measure M (space M))/N" using ‹N>0› by (simp add: mult.commute mult_imp_le_div_pos)
then show "measure M A < e" using ‹(measure M (space M))/N<e› by simp
qed
then have "measure M A ≤ 0" using not_less by blast
then have "measure M A = 0" by (simp add: measure_le_0_iff)
then have "emeasure M A = 0" using emeasure_eq_measure by simp
then show False using ‹emeasure M A > 0› by simp
qed
qed
text ‹The following fact that powers of conservative maps are also conservative is true,
but nontrivial. It is proved as follows: consider a set $A$ with positive measure,
take a time $n_1$ such that $A_1 = T^{-n_1} A \cap A$ has positive measure, then a time
$n_2$ such that $A_2 = T^{-n_2} A_1 \cap A$ has positive measure, and so on. It follows
that $T^{-(n_i+n_{i+1}+\dots+n_j)}A \cap A$ has positive measure for all $i<j$. Then, one
can find $i<j$ such that $n_i+\dots+n_j$ is a multiple of $N$.›
proposition (in conservative) conservative_power:
"conservative M (T^^n)"
proof (unfold_locales)
show "T ^^ n ∈ quasi_measure_preserving M M"
by (auto simp add: Tn_quasi_measure_preserving)
fix A assume [measurable]: "A ∈ sets M" "0 < emeasure M A"
define good_time where "good_time = (λK. Inf{(i::nat). i > 0 ∧ emeasure M ((T^^i)-`K ∩ A) > 0})"
define next_good_set where "next_good_set = (λK. (T^^(good_time K))-`K ∩ A)"
have good_rec: "((good_time K > 0) ∧ (next_good_set K ⊆ A) ∧
(next_good_set K ∈ sets M) ∧ (emeasure M (next_good_set K) > 0))"
if [measurable]: "K ∈ sets M" and "K ⊆ A" "emeasure M K > 0" for K
proof -
have a: "next_good_set K ∈ sets M" "next_good_set K ⊆ A"
using next_good_set_def by simp_all
obtain k where "k > 0" and posK: "emeasure M ((T^^k)-`K ∩ K) > 0"
using conservative[OF ‹K ∈ sets M›, OF ‹emeasure M K > 0›] by auto
have *:"(T^^k)-`K ∩ K ⊆ (T^^k)-`K ∩ A" using ‹K ⊆ A› by auto
have posKA: "emeasure M ((T^^k)-`K ∩ A) > 0" using emeasure_mono[OF *, of M] posK by simp
let ?S = "{(i::nat). i>0 ∧ emeasure M ((T^^i)-`K ∩ A) > 0}"
have "k ∈ ?S" using ‹k>0› posKA by simp
then have "?S ≠ {}" by auto
then have "Inf ?S ∈ ?S" using Inf_nat_def1[of ?S] by simp
then have "good_time K ∈ ?S" using good_time_def by simp
then show "(good_time K > 0) ∧ (next_good_set K ⊆ A) ∧
(next_good_set K ∈ sets M) ∧ (emeasure M (next_good_set K) > 0)"
using a next_good_set_def by auto
qed
define B where "B = (λi. (next_good_set^^i) A)"
define t where "t = (λi. good_time (B i))"
have good_B: "(B i ⊆ A) ∧ (B i ∈ sets M) ∧ (emeasure M (B i) > 0)" for i
proof (induction i)
case 0
have "B 0 = A" using B_def by simp
then show ?case using ‹B 0 = A› ‹A ∈ sets M› ‹emeasure M A > 0› by simp
next
case (Suc i)
moreover have "B (i+1) = next_good_set (B i)" using B_def by simp
ultimately show ?case using good_rec[of "B i"] by auto
qed
have t_pos: "⋀i. t i > 0" using t_def by (simp add: good_B good_rec)
define s where "s = (λi k. (∑n ∈ {i..<i+k}. t n))"
have "B (i+k) ⊆ (T^^(s i k))-`A ∩ A" for i k
proof (induction k)
case 0
show ?case using s_def good_B[of i] by simp
next
case (Suc k)
have "B(i+k+1) = (T^^(t (i+k)))-`(B (i+k)) ∩ A" using t_def B_def next_good_set_def by simp
moreover have "B(i+k) ⊆ (T^^(s i k))-`A" using Suc.IH by simp
ultimately have "B(i+k+1) ⊆ (T^^(t (i+k)))-` (T^^(s i k))-`A ∩ A" by auto
then have "B(i+k+1) ⊆ (T^^(t(i+k) + s i k))-`A ∩ A" by (simp add: add.commute funpow_add vimage_comp)
moreover have "t(i+k) + s i k = s i (k+1)" using s_def by simp
ultimately show ?case by simp
qed
moreover have "(T^^j)-`A ∩ A ∈ sets M" for j by simp
ultimately have *: "emeasure M ((T^^(s i k))-`A ∩ A) > 0" for i k
by (metis inf.orderE inf.strict_boundedE good_B emeasure_mono)
show "∃k>0. 0 < emeasure M (((T ^^ n) ^^ k) -` A ∩ A)"
proof (cases)
assume "n = 0"
then have "((T ^^ n) ^^ 1) -` A = A" by simp
then show ?thesis using ‹emeasure M A > 0› by auto
next
assume "¬(n = 0)"
then have "n > 0" by simp
define u where "u = (λi. s 0 i mod n)"
have "range u ⊆ {..<n}" by (simp add: ‹0 < n› image_subset_iff u_def)
then have "finite (range u)" using finite_nat_iff_bounded by auto
then have "∃i j. (i<j) ∧ (u i = u j)" by (metis finite_imageD infinite_UNIV_nat injI less_linear)
then obtain i k where "k>0" "u i = u (i+k)" using less_imp_add_positive by blast
moreover have "s 0 (i+k) = s 0 i + s i k" unfolding s_def by (simp add: sum.atLeastLessThan_concat)
ultimately have "(s i k) mod n = 0" using u_def nat_mod_cong by metis
then obtain r where "s i k = n * r" by auto
moreover have "s i k > 0" unfolding s_def
using ‹k > 0› t_pos sum_strict_mono[of "{i..<i+k}", of "λx. 0", of "λx. t x"] by simp
ultimately have "r > 0" by simp
moreover have "emeasure M ((T^^(n * r))-`A ∩ A) > 0" using * ‹s i k = n * r› by metis
ultimately show ?thesis by (metis funpow_mult)
qed
qed
proposition (in conservative_mpt) conservative_mpt_power:
"conservative_mpt M (T^^n)"
using conservative_power mpt_power unfolding conservative_mpt_def by auto
text ‹The standard way to use conservativity is as follows: if a set is almost disjoint from
all its preimages, then it is null:›
lemma (in conservative) ae_disjoint_then_null:
assumes "A ∈ sets M"
"⋀n. n > 0 ⟹ A ∩ (T^^n)-`A ∈ null_sets M"
shows "A ∈ null_sets M"
by (metis Int_commute assms(1) assms(2) conservative zero_less_iff_neq_zero null_setsD1 null_setsI)
lemma (in conservative) disjoint_then_null:
assumes "A ∈ sets M"
"⋀n. n > 0 ⟹ A ∩ (T^^n)-`A = {}"
shows "A ∈ null_sets M"
by (rule ae_disjoint_then_null, auto simp add: assms)
text ‹Conservativity is preserved by replacing the measure by an equivalent one.›
lemma (in conservative) conservative_density:
assumes [measurable]: "h ∈ borel_measurable M"
and "AE x in M. h x ≠ 0" "AE x in M. h x ≠ ∞"
shows "conservative (density M h) T"
proof -
interpret A: qmpt "density M h" T
by (rule qmpt_density[OF assms])
show ?thesis
apply (rule conservativeI3) apply (simp add: A.qmpt_axioms)
unfolding sets_density null_sets_density[OF assms(1) assms(2)]
by (metis conservative emeasure_empty not_gr_zero null_setsI)
qed
context qmpt begin
text ‹We introduce the recurrent subset of $A$, i.e., the set of points of $A$ that return to
$A$, and the infinitely recurrent subset, i.e., the set of points of $A$ that return
infinitely often to $A$. In conservative systems, both coincide with $A$ almost everywhere.›
definition recurrent_subset::"'a set ⇒ 'a set"
where "recurrent_subset A = (⋃n ∈ {1..}. A ∩ (T^^n)-`A)"
definition recurrent_subset_infty::"'a set ⇒ 'a set"
where "recurrent_subset_infty A = A - (⋃n. (T^^n)-` (A - recurrent_subset A))"
lemma recurrent_subset_infty_inf_returns:
"x ∈ recurrent_subset_infty A ⟷ (x ∈ A ∧ infinite {n. (T^^n) x ∈ A})"
proof
assume *: "x ∈ recurrent_subset_infty A"
have "infinite {n. (T^^n) x ∈ A}"
proof (rule ccontr)
assume "¬(infinite {n. (T^^n) x ∈ A})"
then have F: "finite {n. (T^^n) x ∈ A}" by auto
have "0 ∈ {n. (T^^n) x ∈ A}" using * recurrent_subset_infty_def by auto
then have NE: "{n. (T^^n) x ∈ A} ≠ {}" by blast
define N where "N = Max {n. (T^^n) x ∈ A}"
have "N ∈ {n. (T^^n) x ∈ A}" unfolding N_def using F NE using Max_in by auto
then have "(T^^N) x ∈ A" by auto
moreover have "x ∉ (T^^N)-` (A - recurrent_subset A)" using * unfolding recurrent_subset_infty_def by auto
ultimately have "(T^^N) x ∈ recurrent_subset A" by auto
then have "(T ^^ N) x ∈ A ∧ (∃n. n ∈ {1..} ∧ (T ^^ n) ((T ^^ N) x) ∈ A)"
unfolding recurrent_subset_def by blast
then obtain n where "n>0" "(T^^n) ((T^^N) x) ∈ A"
by (metis atLeast_iff gr0I not_one_le_zero)
then have "n+N ∈ {n. (T^^n) x ∈ A}" by (simp add: funpow_add)
then show False unfolding N_def using ‹n>0› F NE
by (metis Max_ge Nat.add_0_right add.commute nat_add_left_cancel_less not_le)
qed
then show "x ∈ A ∧ infinite {n. (T^^n) x ∈ A}" using * recurrent_subset_infty_def by auto
next
assume *: "(x ∈ A ∧ infinite {n. (T ^^ n) x ∈ A})"
{
fix n
obtain N where "N>n" "(T^^N) x ∈ A" using *
using infinite_nat_iff_unbounded by force
define k where "k = N-n"
then have "k>0" "N = n+k" using ‹N>n› by auto
then have "(T^^k) ((T^^n) x) ∈ A"
by (metis ‹(T ^^ N) x ∈ A› ‹N = n + k› add.commute comp_def funpow_add)
then have "(T^^ n) x ∉ A - recurrent_subset A"
unfolding recurrent_subset_def using ‹k>0› by auto
}
then show "x ∈ recurrent_subset_infty A" unfolding recurrent_subset_infty_def using * by auto
qed
lemma recurrent_subset_infty_series_infinite:
assumes "x ∈ recurrent_subset_infty A"
shows "(∑n. indicator A ((T^^n) x)) = (∞::ennreal)"
proof (rule ennreal_ge_nat_imp_PInf)
have *: "¬ finite {n. (T^^n) x ∈ A}" using recurrent_subset_infty_inf_returns assms by auto
fix N::nat
obtain F where F: "finite F" "F ⊆ {n. (T^^n) x ∈ A}" "card F = N"
using infinite_arbitrarily_large[OF *] by blast
have "N = (∑n ∈ F. 1::ennreal)"
using F(3) by auto
also have "... = (∑n ∈ F. (indicator A ((T^^n) x))::ennreal)"
apply (rule sum.cong) using F(2) indicator_def by auto
also have "... ≤ (∑n. indicator A ((T^^n) x))"
by (rule sum_le_suminf, auto simp add: F)
finally show "N ≤ (∑n. (indicator A ((T^^n) x))::ennreal)" by auto
qed
lemma recurrent_subset_infty_def':
"recurrent_subset_infty A = (⋂m. (⋃n∈{m..}. A ∩ (T^^n)-`A))"
proof (auto)
fix x assume x: "x ∈ recurrent_subset_infty A"
then show "x ∈ A" unfolding recurrent_subset_infty_def by auto
fix N::nat
show "∃n∈{N..}. (T^^n) x ∈ A" using recurrent_subset_infty_inf_returns x
using infinite_nat_iff_unbounded_le by auto
next
fix x assume "x ∈ A" "∀N. ∃n∈{N..}. (T^^n) x ∈ A"
then show "x ∈ recurrent_subset_infty A"
unfolding recurrent_subset_infty_inf_returns using infinite_nat_iff_unbounded_le by auto
qed
lemma recurrent_subset_incl:
"recurrent_subset A ⊆ A"
"recurrent_subset_infty A ⊆ A"
"recurrent_subset_infty A ⊆ recurrent_subset A"
unfolding recurrent_subset_def recurrent_subset_infty_def' by (simp, simp, fast)
lemma recurrent_subset_meas [measurable]:
assumes [measurable]: "A ∈ sets M"
shows "recurrent_subset A ∈ sets M"
"recurrent_subset_infty A ∈ sets M"
unfolding recurrent_subset_def recurrent_subset_infty_def' by measurable
lemma recurrent_subset_rel_incl:
assumes "A ⊆ B"
shows "recurrent_subset A ⊆ recurrent_subset B"
"recurrent_subset_infty A ⊆ recurrent_subset_infty B"
proof -
show "recurrent_subset A ⊆ recurrent_subset B"
unfolding recurrent_subset_def using assms by auto
show "recurrent_subset_infty A ⊆ recurrent_subset_infty B"
apply (auto, subst recurrent_subset_infty_inf_returns)
using assms recurrent_subset_incl(2) infinite_nat_iff_unbounded_le recurrent_subset_infty_inf_returns by fastforce
qed
text ‹If a point belongs to the infinitely recurrent subset of $A$, then when they return to $A$
its iterates also belong to the infinitely recurrent subset.›
lemma recurrent_subset_infty_returns:
assumes "x ∈ recurrent_subset_infty A" "(T^^n) x ∈ A"
shows "(T^^n) x ∈ recurrent_subset_infty A"
proof (subst recurrent_subset_infty_inf_returns, rule ccontr)
assume "¬ ((T ^^ n) x ∈ A ∧ infinite {k. (T ^^ k) ((T ^^ n) x) ∈ A})"
then have 1: "finite {k. (T^^k) ((T^^n) x) ∈ A}" using assms(2) by auto
have "0 ∈ {k. (T^^k) ((T^^n) x) ∈ A}" using assms(2) by auto
then have 2: "{k. (T^^k) ((T^^n) x) ∈ A} ≠ {}" by blast
define M where "M = Max {k. (T^^k) ((T^^n) x) ∈ A}"
have M_prop: "⋀k. k > M ⟹ (T^^k) ((T^^n) x) ∉ A"
unfolding M_def using 1 2 by auto
{
fix N assume *: "(T^^N) x ∈ A"
have "N ≤ n+M"
proof (cases)
assume "N ≤ n"
then show ?thesis by auto
next
assume "¬(N ≤ n)"
then have "N > n" by simp
define k where "k = N-n"
have "N = n + k" unfolding k_def using ‹N > n› by auto
then have "(T^^k) ((T^^n)x) ∈ A" using * by (simp add: add.commute funpow_add)
then have "k ≤ M" using M_prop using not_le by blast
then show ?thesis unfolding k_def by auto
qed
}
then have "finite {N. (T^^N) x ∈ A}"
by (metis (no_types, lifting) infinite_nat_iff_unbounded mem_Collect_eq not_less)
moreover have "infinite {N. (T^^N) x ∈ A}"
using recurrent_subset_infty_inf_returns assms(1) by auto
ultimately show False by auto
qed
lemma recurrent_subset_of_recurrent_subset:
"recurrent_subset_infty(recurrent_subset_infty A) = recurrent_subset_infty A"
proof
show "recurrent_subset_infty (recurrent_subset_infty A) ⊆ recurrent_subset_infty A"
using recurrent_subset_incl(2)[of A] recurrent_subset_rel_incl(2) by auto
show "recurrent_subset_infty A ⊆ recurrent_subset_infty (recurrent_subset_infty A)"
using recurrent_subset_infty_returns recurrent_subset_infty_inf_returns
by (metis (no_types, lifting) Collect_cong subsetI)
qed
text ‹The Poincare recurrence theorem states that almost every point of $A$ returns
(infinitely often) to $A$, i.e., the recurrent and infinitely recurrent subsets of $A$
coincide almost everywhere with $A$. This is essentially trivial in conservative systems,
as it is a reformulation of the definition of conservativity. (What is not trivial, and has been
proved above, is that it is true in finite measure preserving systems, i.e., finite measure
preserving systems are automatically conservative.)›
theorem (in conservative) Poincare_recurrence_thm:
assumes [measurable]: "A ∈ sets M"
shows "A - recurrent_subset A ∈ null_sets M"
"A - recurrent_subset_infty A ∈ null_sets M"
"A Δ recurrent_subset A ∈ null_sets M"
"A Δ recurrent_subset_infty A ∈ null_sets M"
"emeasure M (recurrent_subset A) = emeasure M A"
"emeasure M (recurrent_subset_infty A) = emeasure M A"
"AE x ∈ A in M. x ∈ recurrent_subset_infty A"
proof -
define B where "B = {x ∈ A. ∀ n∈{1..}. (T^^n) x ∈ (space M - A)}"
have rs: "recurrent_subset A = A - B"
by (auto simp add: B_def recurrent_subset_def)
(meson Tn_meas assms measurable_space sets.sets_into_space subsetCE)
then have *: "A - recurrent_subset A = B" using B_def by blast
have "B ∈ null_sets M"
by (rule disjoint_then_null, auto simp add: B_def)
then show "A - recurrent_subset A ∈ null_sets M" using * by simp
then have *: "(⋃n. (T^^n)--`(A-recurrent_subset A)) ∈ null_sets M"
using T_quasi_preserves_null2(2) by blast
have "recurrent_subset_infty A = recurrent_subset_infty A ∩ space M" using sets.sets_into_space by auto
also have "... = A ∩ space M - (⋃n. (T^^n)-`(A-recurrent_subset A) ∩ space M)" unfolding recurrent_subset_infty_def by blast
also have "... = A - (⋃n. (T^^n)--`(A-recurrent_subset A))" unfolding vimage_restr_def using sets.sets_into_space by auto
finally have **: "recurrent_subset_infty A = A - (⋃n. (T ^^ n) --` (A - recurrent_subset A))" .
then have "A - recurrent_subset_infty A ⊆ (⋃n. (T^^n)--`(A-recurrent_subset A))" by auto
with * ** show "A - recurrent_subset_infty A ∈ null_sets M"
by (simp add: Diff_Diff_Int null_set_Int1)
have "A Δ recurrent_subset A = A - recurrent_subset A" using recurrent_subset_incl(1)[of A] by blast
then show "A Δ recurrent_subset A ∈ null_sets M" using ‹A - recurrent_subset A ∈ null_sets M› by auto
then show "emeasure M (recurrent_subset A) = emeasure M A"
by (rule Delta_null_same_emeasure[symmetric], auto)
have "A Δ recurrent_subset_infty A = A - recurrent_subset_infty A" using recurrent_subset_incl(2)[of A] by blast
then show "A Δ recurrent_subset_infty A ∈ null_sets M" using ‹A - recurrent_subset_infty A ∈ null_sets M› by auto
then show "emeasure M (recurrent_subset_infty A) = emeasure M A"
by (rule Delta_null_same_emeasure[symmetric], auto)
show "AE x∈A in M. x ∈ recurrent_subset_infty A"
unfolding eventually_ae_filter
by (metis (no_types, lifting) DiffI ‹A - recurrent_subset_infty A ∈ null_sets M› mem_Collect_eq subsetI)
qed
text ‹A convenient way to use conservativity is given in the following theorem: if $T$ is
conservative, then the series $\sum_n f(T^n x)$ is infinite for almost every $x$ with $f x > 0$.
When $f$ is an indicator function, this is the fact that, starting from $B$, one returns
infinitely many times to $B$ almost surely. The general case follows by approximating $f$ from
below by constants time indicators.›
theorem (in conservative) recurrence_series_infinite:
fixes f::"'a ⇒ ennreal"
assumes [measurable]: "f ∈ borel_measurable M"
shows "AE x in M. f x > 0 ⟶ (∑n. f ((T^^n) x)) = ∞"
proof -
have *: "AE x in M. f x > epsilon ⟶ (∑n. f ((T^^n) x)) = ⊤" if "epsilon > 0" for epsilon
proof -
define B where "B = {x ∈ space M. f x > epsilon}"
have [measurable]: "B ∈ sets M" unfolding B_def by auto
have "(∑n. f ((T^^n) x)) = ∞" if "x ∈ recurrent_subset_infty B" for x
proof -
have "∞ = epsilon * ∞" using ‹epsilon > 0› ennreal_mult_top by auto
also have "... = epsilon * (∑n. indicator B ((T^^n) x))"
using recurrent_subset_infty_series_infinite[OF that] by simp
also have "... = (∑n. epsilon * indicator B ((T^^n) x))"
by auto
also have "... ≤ (∑n. f ((T^^n) x))"
apply (rule suminf_le) unfolding indicator_def B_def by auto
finally show ?thesis
by (simp add: dual_order.antisym)
qed
moreover have "AE x in M. f x > epsilon ⟶ x ∈ recurrent_subset_infty B"
using Poincare_recurrence_thm(7)[OF ‹B ∈ sets M›] unfolding B_def by auto
ultimately show ?thesis by auto
qed
have "∃u::(nat ⇒ ennreal). (∀n. u n > 0) ∧ u ⇢ 0"
by (meson approx_from_above_dense_linorder ex_gt_or_lt gr_implies_not_zero)
then obtain u::"nat ⇒ ennreal" where u: "⋀n. u n > 0" "u ⇢ 0"
by auto
have "AE x in M. (∀n::nat. (f x > u n ⟶ (∑n. f ((T^^n) x)) = ⊤))"
unfolding AE_all_countable using u by (auto intro!: *)
moreover have "f x > 0 ⟶ (∑n. f ((T^^n) x)) = ∞" if "(∀n::nat. (f x > u n ⟶ (∑n. f ((T^^n) x)) = ⊤))" for x
proof (auto)
assume "f x > 0"
obtain n where "u n < f x"
using order_tendstoD(2)[OF u(2) ‹f x > 0›] eventually_False_sequentially eventually_mono by blast
then show "(∑n. f ((T^^n) x)) = ⊤" using that by auto
qed
ultimately show ?thesis by auto
qed
subsection ‹The first return time›
text ‹The first return time to a set $A$ under the dynamics $T$ is the smallest integer $n$ such
that $T^n(x) \in A$. The first return time is only well defined
on the recurrent subset of $A$, elsewhere we set it to $0$ for definiteness.
We can partition $A$ according to the value of the return time on it, thus defining
the return partition of $A$.›
definition return_time_function::"'a set ⇒ ('a ⇒ nat)"
where "return_time_function A x = (
if (x ∈ recurrent_subset A) then (Inf {n::nat∈{1..}. (T^^n) x ∈ A})
else 0)"
definition return_partition::"'a set ⇒ nat ⇒ 'a set"
where "return_partition A k = A ∩ (T^^k)--`A - (⋃i∈{0<..<k}. (T^^i)--`A)"
text ‹Basic properties of the return partition.›
lemma return_partition_basics:
assumes A_meas [measurable]: "A ∈ sets M"
shows [measurable]: "return_partition A n ∈ sets M"
and "disjoint_family (λn. return_partition A (n+1))"
"(⋃n. return_partition A (n+1)) = recurrent_subset A"
proof -
show "return_partition A n ∈ sets M" for n unfolding return_partition_def by auto
define B where "B = (λn. A ∩ (T^^(n+1))--`A)"
have "return_partition A (n+1) = B n -(⋃i∈{0..<n}. B i)" for n
unfolding return_partition_def B_def by (auto) (auto simp add: less_Suc_eq_0_disj)
then have *: "⋀n. return_partition A (n+1) = disjointed B n" using disjointed_def[of B] by simp
then show "disjoint_family (λn. return_partition A (n+1))" using disjoint_family_disjointed by simp
have "A ∩ (T^^n)-`A = A ∩ (T^^n)--`A" for n
using sets.sets_into_space[OF A_meas] by auto
then have "recurrent_subset A = (⋃n∈ {1..}. A ∩ (T^^n)--`A)" unfolding recurrent_subset_def by simp
also have "... = (⋃n. B n)" by (simp add: B_def atLeast_Suc_greaterThan greaterThan_0)
also have "... = (⋃n. return_partition A (n+1))" using * UN_disjointed_eq[of B] by simp
finally show "(⋃n. return_partition A (n+1)) = recurrent_subset A" by simp
qed
text ‹Basic properties of the return time, relationship with the return partition.›
lemma return_time0:
"(return_time_function A)-`{0} = UNIV - recurrent_subset A"
proof (auto)
fix x
assume *: "x ∈ recurrent_subset A" "return_time_function A x = 0"
define K where "K = {n::nat∈{1..}. (T^^n) x ∈ A}"
have **: "return_time_function A x = Inf K"
using K_def return_time_function_def * by simp
have "K ≠ {}" using K_def recurrent_subset_def * by auto
moreover have "0 ∉ K" using K_def by auto
ultimately have "Inf K >0"
by (metis (no_types, lifting) K_def One_nat_def atLeast_iff cInf_lessD mem_Collect_eq neq0_conv not_le zero_less_Suc)
then have "return_time_function A x > 0" using ** by simp
then show "False" using * by simp
qed (auto simp add: return_time_function_def)
lemma return_time_n:
assumes [measurable]: "A ∈ sets M"
shows "(return_time_function A)-`{Suc n} = return_partition A (Suc n)"
proof (auto)
fix x assume *: "return_time_function A x = Suc n"
then have rx: "x ∈ recurrent_subset A" using return_time_function_def by (auto, meson Zero_not_Suc)
define K where "K = {i∈{1..}. (T^^i) x ∈ A}"
have "return_time_function A x = Inf K" using return_time_function_def rx K_def by auto
then have "Inf K = Suc n" using * by simp
moreover have "K ≠ {}" using rx recurrent_subset_def K_def by auto
ultimately have "Suc n ∈ K" using Inf_nat_def1[of K] by simp
then have "(T^^(Suc n))x ∈ A" using K_def by auto
then have a: "x ∈ A ∩ (T^^(Suc n))--`A"
using rx recurrent_subset_incl[of A] sets.sets_into_space[OF assms] by auto
have "⋀i. i∈{1..<Suc n} ⟹ i ∉ K" using cInf_lower ‹Inf K = Suc n› by force
then have "⋀i. i∈{1..<Suc n} ⟹ x ∉ (T^^i)--`A" using K_def by auto
then have "x ∉ (⋃i∈{1..<Suc n}. (T^^i)--`A)" by auto
then show "x ∈ return_partition A (Suc n)" using a return_partition_def by simp
next
fix x assume *: "x ∈ return_partition A (Suc n)"
then have a: "x ∈ space M" unfolding return_partition_def using vimage_restr_def by blast
define K where "K = {i::nat∈{1..}. (T^^i) x ∈ A}"
have "Inf K = Suc n"
apply (rule cInf_eq_minimum) using * by (auto simp add: a assms K_def return_partition_def)
have "x ∈ recurrent_subset A" using * return_partition_basics(3)[OF assms] by auto
then show "return_time_function A x = Suc n"
using return_time_function_def K_def ‹Inf K = Suc n› by auto
qed
text ‹The return time is measurable.›
lemma return_time_function_meas [measurable]:
assumes [measurable]: "A ∈ sets M"
shows "return_time_function A ∈ measurable M (count_space UNIV)"
"return_time_function A ∈ borel_measurable M"
proof -
have "(return_time_function A)-`{n} ∩ space M ∈ sets M" for n
proof (cases "n = 0")
case True
then show ?thesis using return_time0 recurrent_subset_meas[OF assms] by auto
next
case False
show ?thesis
using return_time_n return_partition_basics(1)[OF assms] not0_implies_Suc[OF False] by auto
qed
then show "return_time_function A ∈ measurable M (count_space UNIV)"
by (simp add: measurable_count_space_eq2_countable assms)
then show "return_time_function A ∈ borel_measurable M"
using measurable_cong_sets sets_borel_eq_count_space by blast
qed
text ‹A close cousin of the return time and the return partition is the first entrance set:
we partition the space according to the first positive time where a point enters $A$.›
definition first_entrance_set::"'a set ⇒ nat ⇒ 'a set"
where "first_entrance_set A n = (T^^n) --` A - (⋃ i<n. (T^^i)--`A)"
lemma first_entrance_meas [measurable]:
assumes [measurable]: "A ∈ sets M"
shows "first_entrance_set A n ∈ sets M"
unfolding first_entrance_set_def by measurable
lemma first_entrance_disjoint:
"disjoint_family (first_entrance_set A)"
proof -
have "first_entrance_set A = disjointed (λi. (T^^i)--`A)"
by (auto simp add: disjointed_def first_entrance_set_def)
then show ?thesis by (simp add: disjoint_family_disjointed)
qed
text ‹There is an important dynamical phenomenon: if a point has first entrance time equal to
$n$, then their preimages either have first entrance time equal to $n+1$ (these are the preimages
not in $A$) or they belong to $A$ and have first return time equal to $n+1$. When $T$ preserves the
measure, this gives an inductive control on the measure of the first entrance set, that will be
used again and again in the proof of Kac's Formula. We formulate these (simple but extremely useful)
facts now.›
lemma first_entrance_rec:
assumes [measurable]: "A ∈ sets M"
shows "first_entrance_set A (Suc n) = T--`(first_entrance_set A n) - A"
proof -
have A0: "A = (T^^0)--`A" by auto
have "first_entrance_set A n = (T^^n) --` A - (⋃ i<n. (T^^i)--`A)"
using first_entrance_set_def by simp
then have "T--`(first_entrance_set A n) = (T^^(n+1))--`A - (⋃ i<n. (T^^(i+1))--`A)"
using T_vrestr_composed(2) ‹A ∈ sets M› by simp
then have *: "T--`(first_entrance_set A n) - A = (T^^(n+1))--`A - (A ∪ (⋃ i<n. (T^^(i+1))--`A))"
by blast
have "(⋃ i<n. (T^^(i+1))--`A) = (⋃ j∈{1..<n+1}. (T^^j)--`A)"
by (rule UN_le_add_shift_strict)
then have "A ∪ (⋃ i<n. (T^^(i+1))--`A) = (⋃ j∈{0..<n+1}. (T^^j)--`A)"
by (metis A0 Un_commute atLeast0LessThan UN_le_eq_Un0_strict)
then show ?thesis using * first_entrance_set_def by auto
qed
lemma return_time_rec:
assumes "A ∈ sets M"
shows "(return_time_function A)-`{Suc n} = T--`(first_entrance_set A n) ∩ A"
proof -
have "return_partition A (Suc n) = T--`(first_entrance_set A n) ∩ A"
unfolding return_partition_def first_entrance_set_def
by (auto simp add: T_vrestr_composed[OF assms]) (auto simp add: less_Suc_eq_0_disj)
then show ?thesis using return_time_n[OF assms] by simp
qed
subsection ‹Local time controls›
text ‹The local time is the time that an orbit spends in a given set. Local time controls
are basic to all the forthcoming developments.›
definition local_time::"'a set ⇒ nat ⇒ 'a ⇒ nat"
where "local_time A n x = card {i∈{..<n}. (T^^i) x ∈ A}"
lemma local_time_birkhoff:
"local_time A n x = birkhoff_sum (indicator A) n x"
proof (induction n)
case 0
then show ?case unfolding local_time_def birkhoff_sum_def by simp
next
case (Suc n)
have "local_time A (n+1) x = local_time A n x + indicator A ((T^^n) x)"
proof (cases)
assume *: "(T^^n) x ∈ A"
then have "{i∈{..<Suc n}. (T^^i) x ∈ A} = {i∈{..<n}. (T^^i) x ∈ A} ∪ {n}"
by auto
then have "card {i∈{..<Suc n}. (T^^i) x ∈ A} = card {i∈{..<n}. (T^^i) x ∈ A} + card {n}"
using card_Un_disjoint by auto
then have "local_time A (n+1) x = local_time A n x + 1" using local_time_def by simp
moreover have "indicator A ((T^^n)x) = (1::nat)" using * indicator_def by auto
ultimately show ?thesis by simp
next
assume *: "¬((T^^n) x ∈ A)"
then have "{i∈{..<Suc n}. (T^^i) x ∈ A} = {i∈{..<n}. (T^^i) x ∈ A}" using less_Suc_eq by force
then have "card {i∈{..<Suc n}. (T^^i) x ∈ A} = card {i∈{..<n}. (T^^i) x ∈ A}"
by auto
then have "local_time A (n+1) x = local_time A n x" using local_time_def by simp
moreover have "indicator A ((T^^n)x) = (0::nat)" using * indicator_def by auto
ultimately show ?thesis by simp
qed
then have "local_time A (n+1) x = birkhoff_sum (indicator A) n x + indicator A ((T^^n) x)"
using Suc.IH by auto
moreover have "birkhoff_sum (indicator A) (n+1) x = birkhoff_sum (indicator A) n x + indicator A ((T^^n) x)"
by (metis birkhoff_sum_cocycle[where ?n = "n" and ?m = "1"] birkhoff_sum_1(2))
ultimately have "local_time A (n+1) x = birkhoff_sum (indicator A) (n+1) x" by metis
then show ?case by (metis Suc_eq_plus1)
qed
lemma local_time_meas [measurable]:
assumes [measurable]: "A ∈ sets M"
shows "local_time A n ∈ borel_measurable M"
unfolding local_time_birkhoff by auto
lemma local_time_cocycle:
"local_time A n x + local_time A m ((T^^n)x) = local_time A (n+m) x"
by (metis local_time_birkhoff birkhoff_sum_cocycle)
lemma local_time_incseq:
"incseq (λn. local_time A n x)"
using local_time_cocycle incseq_def by (metis le_iff_add)
lemma local_time_Suc:
"local_time A (n+1) x = local_time A n x + indicator A ((T^^n)x)"
by (metis local_time_birkhoff birkhoff_sum_cocycle birkhoff_sum_1(2))
text ‹The local time is bounded by $n$: at most, one returns to $A$ all the time!›
lemma local_time_bound:
"local_time A n x ≤ n"
proof -
have "card {i∈{..<n}. (T^^i) x ∈ A} ≤ card {..<n}" by (rule card_mono, auto)
then show ?thesis unfolding local_time_def by auto
qed
text ‹The fact that local times are unbounded will be the main technical tool in the proof
of recurrence results or Kac formula below. In this direction, we prove more and more general
results in the lemmas below.
We show that, in $T^{-n}(A)$, the number of visits to $A$ tends to infinity in
measure, when $A$ has finite measure. In other words, the points in $T^{-n}(A)$ with
local time $<k$ have a measure tending to $0$ with $k$. The argument, by induction on $k$, goes
as follows.
Consider the last return to $A$ before time $n$, say at time $n-i$. It lands in the set $S_i$ with
retun time $i$. We get $T^{-n} A \subseteq \bigcup_{n<N} T^{-(n-i)}S_i \cup R$, where the union
is disjoint and $R$ is a set of measure $\mu(T^{-n}A) - \sum_{n<N} \mu(T^{-(n-i)}S_i)
= \mu(A) -\sum_{n<N} \mu(S_i)$, which tends to $0$ with $N$ and that we may therefore discard.
A point with local time $<k$ at time $n$ in $T^{-n}A$ is then a point with local time $<k-1$
at time $n-i$ in $T^{-(n-i)}S_i \subseteq T^{-(n-i)}A$. Hence, we may conclude by the induction
assumption that this has small measure.›
lemma (in conservative_mpt) local_time_unbounded1:
assumes A_meas [measurable]: "A ∈ sets M"
and fin: "emeasure M A < ∞"
shows "(λn. emeasure M {x ∈ (T^^n)--`A. local_time A n x < k}) ⇢ 0"
proof (induction k)
case 0
have "{x ∈ (T^^n)--`A. local_time A n x < 0} = {}" for n by simp
then show ?case by simp
next
case (Suc k)
define K where "K = (λp n. {x ∈ (T^^n)--`A. local_time A n x < p})"
have K_meas [measurable]: "K p n ∈ sets M" for n p
unfolding K_def by measurable
show ?case
proof (rule tendsto_zero_ennreal)
fix e :: real assume "0 < e"
define e2 where "e2 = e/3"
have "e2 > 0" using e2_def ‹e>0› by simp
have "(∑n. emeasure M (return_partition A (n+1))) = emeasure M ((⋃n. return_partition A (n + 1)))"
apply (rule suminf_emeasure) using return_partition_basics[OF A_meas] by auto
also have "... = emeasure M (recurrent_subset A)"
using return_partition_basics(3)[OF A_meas] by simp
also have "... = emeasure M A"
by (metis A_meas double_diff emeasure_Diff_null_set order_refl Poincare_recurrence_thm(1)[OF A_meas] recurrent_subset_incl(1))
finally have "(∑n. emeasure M (return_partition A (n+1))) = emeasure M A" by simp
moreover have "summable (λn. emeasure M (return_partition A (n+1)))"
by simp
ultimately have "(λN. (∑n<N. emeasure M (return_partition A (n+1)))) ⇢ emeasure M A"
unfolding sums_def[symmetric] sums_iff by simp
then have "(λN. (∑n<N. emeasure M (return_partition A (n+1))) + e2) ⇢ emeasure M A + e2"
by (intro tendsto_add) auto
moreover have "emeasure M A < emeasure M A + e2"
using ‹emeasure M A < ∞› ‹0 < e2› by auto
ultimately have "eventually (λN. (∑n<N. emeasure M (return_partition A (n+1))) + e2 > emeasure M A) sequentially"
by (simp add: order_tendsto_iff)
then obtain N where "N>0" and largeM: "(∑n<N. emeasure M (return_partition A (n+1))) + e2 > emeasure M A"
by (metis (no_types, lifting) add.commute add_Suc_right eventually_at_top_linorder le_add2 zero_less_Suc)
have upper: "emeasure M (K (Suc k) n) ≤ e2 + (∑i<N. emeasure M (K k (n-i-1)))" if "n>N" for n
proof -
define B where "B = (λi. (T^^(n-i-1))--`(return_partition A (i+1)))"
have B_meas [measurable]: "B i ∈ sets M" for i unfolding B_def by measurable
have disj_B: "disjoint_family_on B {..<N}"
proof -
have "B i ∩ B j = {}" if "i∈{..<N}" "j∈{..<N}" "i < j" for i j
proof -
have "n > i" "n>j" using ‹n>N› that by auto
let ?k = "j-i"
have "x ∉ B i" if "x ∈ B j" for x
proof -
have "(T^^(n-j-1)) x ∈ return_partition A (j+1)" using B_def that by auto
moreover have "?k>0" using ‹i < j› by simp
moreover have "?k < j+1" by simp
ultimately have "(T^^(n-j-1)) x ∉ (T^^?k)--`A" using return_partition_def by auto
then have "x ∉ (T^^(n-j-1))--` (T^^?k)--`A" by auto
then have "x ∉ (T^^(n-j-1 + ?k))--`A" using T_vrestr_composed[OF A_meas] by simp
then have "x ∉ (T^^(n-i-1))--`A" using ‹i<j› ‹n>j› by auto
then have "x ∉ (T^^(n-i-1))--` (return_partition A (i+1))" using return_partition_def by auto
then show "x ∉ B i" using B_def by auto
qed
then show "B i ∩ B j = {}" by auto
qed
then have "⋀i j. i∈{..<N} ⟹ j∈{..<N} ⟹ i ≠ j ⟹ B i ∩ B j = {}"
by (metis Int_commute linorder_neqE_nat)
then show ?thesis unfolding disjoint_family_on_def by auto
qed
have incl_B: "B i ⊆ (T^^n)--`A" if "i ∈ {..<N}" for i
proof -
have "n > i" using ‹n>N› that by auto
have "B i ⊆ (T^^(n-i-1))--` (T^^(i+1))--` A"
using B_def return_partition_def by auto
then show "B i ⊆ (T^^n)--`A"
using T_vrestr_composed(1)[OF A_meas, of "n-i-1", of "i+1"] ‹n>i› by auto
qed
define R where "R = (T^^n)--`A - (⋃i ∈ {..<N}. B i)"
have [measurable]: "R ∈ sets M" unfolding R_def by measurable
have dec_n: "(T^^n)--`A = R ∪ (⋃i ∈ {..<N}. B i)" using R_def incl_B by blast
have small_R: "emeasure M R < e2"
proof -
have "R ∩ (⋃i ∈ {..<N}. B i) = {}" using R_def by blast
then have "emeasure M ((T^^n)--`A) = emeasure M R + emeasure M (⋃i ∈ {..<N}. B i)"
using plus_emeasure[of R, of M, of "⋃i ∈ {..<N}. B i"] dec_n by auto
moreover have "emeasure M (⋃i ∈ {..<N}. B i) = (∑i ∈ {..<N}. emeasure M (B i))"
by (intro disj_B sum_emeasure[symmetric], auto)
ultimately have "emeasure M ((T^^n)--`A) = emeasure M R + (∑i ∈ {..<N}. emeasure M (B i))"
by simp
moreover have "emeasure M ((T^^n)--`A) = emeasure M A"
using T_vrestr_same_emeasure(2)[OF A_meas] by simp
moreover have "⋀i. emeasure M (B i) = emeasure M (return_partition A (i+1))"
using T_vrestr_same_emeasure(2) B_def return_partition_basics(1)[OF A_meas] by simp
ultimately have a: "emeasure M A = emeasure M R + (∑i ∈ {..<N}. emeasure M (return_partition A (i+1)))"
by simp
moreover have b: "(∑i ∈ {..<N}. emeasure M (return_partition A (i+1))) ≠ ∞" using fin
by (simp add: a less_top)
ultimately show ?thesis
using largeM fin b by simp
qed
have "K (Suc k) n ⊆ R ∪ (⋃i<N. K k (n-i-1))"
proof
fix x assume a: "x ∈ K (Suc k) n"
show "x ∈ R ∪ (⋃i<N. K k (n-i-1))"
proof (cases)
assume "¬(x ∈ R)"
have "x ∈ (T^^n)--`A" using a K_def by simp
then have "x∈ (⋃i ∈ {..<N}. B i)" using dec_n ‹¬(x ∈ R)› by simp
then obtain i where "i∈{..<N}" "x ∈ B i" by auto
then have "n>i" using ‹n>N› by auto
then have "(T^^(n-i-1)) x ∈ return_partition A (i+1)" using B_def ‹x ∈ B i› by auto
then have i: "(T^^(n-i-1)) x ∈ A" using return_partition_def by auto
then have "indicator A ((T^^(n-i-1)) x) = (1::nat)" by auto
then have "local_time A (n-i) x = local_time A (n-i-1) x + 1"
by (metis Suc_diff_Suc Suc_eq_plus1 diff_diff_add local_time_Suc[of A, of "n-i-1"] ‹n>i›)
then have "local_time A (n-i) x > local_time A (n-i-1) x" by simp
moreover have "local_time A n x ≥ local_time A (n-i) x" using local_time_incseq
by (metis ‹i < n› le_add_diff_inverse2 less_or_eq_imp_le local_time_cocycle le_iff_add)
ultimately have "local_time A n x > local_time A (n-i-1) x" by simp
moreover have "local_time A n x < Suc k" using a K_def by simp
ultimately have *: "local_time A (n-i-1) x < k" by simp
have "x ∈ space M" using ‹x ∈ (T^^n)--`A› by auto
then have "x ∈ (T^^(n-i-1))--`A" using i A_meas vimage_restr_def by (metis IntI sets.Int_space_eq2 vimageI)
then have "x ∈ K k (n-i-1)" using * K_def by blast
then show ?thesis using ‹i∈{..<N}› by auto
qed (simp)
qed
then have "emeasure M (K (Suc k) n) ≤ emeasure M (R ∪ (⋃i<N. K k (n-i-1)))"
by (intro emeasure_mono, auto)
also have "... ≤ emeasure M R + emeasure M (⋃i<N. K k (n-i-1))"
by (rule emeasure_subadditive, auto)
also have "... ≤ emeasure M R + (∑i<N. emeasure M (K k (n-i-1)))"
by (metis add_left_mono image_subset_iff emeasure_subadditive_finite[where ?A = "λi. K k (n-i-1)" and ?I = "{..<N}", OF finite_lessThan[of N]] K_meas)
also have "... ≤ e2 + (∑i<N. emeasure M (K k (n-i-1)))"
using small_R by (auto intro!: add_right_mono)
finally show "emeasure M (K (Suc k) n) ≤ e2 + (∑i<N. emeasure M (K k (n-i-1)))" .
qed
have "(λn. (∑i∈{..<N}. emeasure M (K k (n-i-1)))) ⇢ (∑i∈{..<N}. 0)"
apply (intro tendsto_intros seq_offset_neg) using Suc.IH K_def by simp
then have "eventually (λn. (∑i∈{..<N}. emeasure M (K k (n-i-1))) < e2) sequentially"
using ‹e2 > 0› by (simp add: order_tendsto_iff)
then obtain N2 where N2bound: "⋀n. n > N2 ⟹ (∑i∈{..<N}. emeasure M (K k (n-i-1))) < e2"
by (meson eventually_at_top_dense)
define N3 where "N3 = max N N2"
have "emeasure M (K (Suc k) n) < e" if "n > N3" for n
proof -
have "n>N2" "n > N" using N3_def that by auto
then have "emeasure M (K (Suc k) n) ≤ ennreal e2 + (∑i∈{..<N}. emeasure M (K k (n-i-1)))"
using upper by simp
also have "... ≤ ennreal e2 + ennreal e2"
using N2bound[OF ‹n > N2›] less_imp_le by auto
also have "... < e" using e2_def ‹e > 0›
by (auto simp add: ennreal_plus[symmetric] simp del: ennreal_plus intro!: ennreal_lessI)
ultimately show "emeasure M (K (Suc k) n) < e" using le_less_trans by blast
qed
then show "∀⇩F x in sequentially. emeasure M {xa ∈ (T ^^ x) --` A. local_time A x xa < Suc k} < ennreal e"
unfolding K_def by (auto simp: eventually_at_top_dense intro!: exI[of _ N3])
qed
qed
text ‹We deduce that local times to a set $B$ also tend to infinity on $T^{-n}A$ if $B$ is related
to $A$, i.e., if points in $A$ have some iterate in $B$.
This is clearly a necessary condition for the lemmas to hold: otherwise, points of $A$
that never visit $B$ have a local time equal to $B$ equal to $0$, and so do all their preimages.
The lemmas are readily reduced to the previous one on the local time to $A$, since if one visits
$A$ then one visits $B$ in finite time by assumption (uniformly bounded in the first lemma,
uniformly bounded on a set of large measure in the second lemma).›
lemma (in conservative_mpt) local_time_unbounded2:
assumes A_meas [measurable]: "A ∈ sets M"
and fin: "emeasure M A < ∞"
and incl: "A ⊆ (T^^i)--`B"
shows "(λn. emeasure M {x ∈ (T^^n)--`A. local_time B n x < k}) ⇢ 0"
proof -
have "emeasure M {x ∈ (T^^n)--`A. local_time B n x < k} ≤ emeasure M {x ∈ (T^^n)--`A. local_time A n x < k + i}"
if "n > i" for n
proof -
have "local_time A n x ≤ local_time B n x + i" for x
proof -
have "local_time B n x ≥ local_time A (n-i) x"
proof -
define KA where "KA = {t ∈ {0..<n-i}. (T^^t) x ∈ A}"
define KB where "KB = {t ∈ {0..<n}. (T^^t) x ∈ B}"
then have "KB ⊆ {0..<n}" by auto
then have "finite KB" using finite_lessThan[of n] finite_subset by auto
let ?g = "λt. t + i"
have "⋀t. t ∈ KA ⟹ ?g t ∈ KB"
proof -
fix t assume "t ∈ KA"
then have "(T^^t) x ∈ A" using KA_def by simp
then have "(T^^i) ((T^^t) x) ∈ B" using incl by auto
then have "(T^^(t+i)) x ∈ B" by (simp add: funpow_add add.commute)
moreover have "t+i < n" using ‹t ∈ KA› KA_def ‹n > i› by auto
ultimately show "?g t ∈ KB" unfolding KB_def by simp
qed
then have "?g`KA ⊆ KB" by auto
moreover have "inj_on ?g KA" by simp
ultimately have "card KB ≥ card KA"
using card_inj_on_le[where ?f = "?g" and ?A = KA and ?B = KB] ‹finite KB› by simp
then show ?thesis using KA_def KB_def local_time_def by simp
qed
moreover have "i ≥ local_time A i ((T^^(n-i))x)" using local_time_bound by auto
ultimately show "local_time B n x + i ≥ local_time A n x"
using local_time_cocycle[where ?n = "n-i" and ?m = i and ?x = x and ?A = A] ‹n>i› by auto
qed
then have "local_time B n x < k ⟹ local_time A n x < k + i" for x
by (meson add_le_cancel_right le_trans not_less)
then show ?thesis
by (intro emeasure_mono, auto)
qed
then have "eventually (λn. emeasure M {x ∈ (T^^n)--`A. local_time B n x < k}
≤ emeasure M {x ∈ (T^^n)--`A. local_time A n x < k + i}) sequentially"
using eventually_at_top_dense by blast
from tendsto_sandwich[OF _ this tendsto_const local_time_unbounded1[OF A_meas fin, of "k+i"]]
show ?thesis by auto
qed
lemma (in conservative_mpt) local_time_unbounded3:
assumes A_meas[measurable]: "A ∈ sets M"
and B_meas[measurable]: "B ∈ sets M"
and fin: "emeasure M A < ∞"
and incl: "A - (⋃i. (T^^i)--`B) ∈ null_sets M"
shows "(λn. emeasure M {x ∈ (T^^n)--`A. local_time B n x < k}) ⇢ 0"
proof -
define R where "R = A - (⋃i. (T^^i)--`B)"
have R_meas[measurable]: "R ∈ sets M"
by (simp add: A_meas B_meas T_vrestr_meas(2)[OF B_meas] R_def countable_Un_Int(1) sets.Diff)
have "emeasure M R = 0" using incl R_def by auto
define A2 where "A2 = A - R"
have A2_meas [measurable]: "A2 ∈ sets M" unfolding A2_def by auto
have meq: "emeasure M A2 = emeasure M A" using ‹emeasure M R = 0›
unfolding A2_def by (subst emeasure_Diff) (auto simp: R_def)
then have A2_fin: "emeasure M A2 < ∞" using fin by auto
define K where "K = (λN. A2 ∩ (⋃i<N. (T^^i)--`B))"
have K_meas [measurable]: "K N ∈ sets M" for N unfolding K_def by auto
have K_incl: "⋀N. K N ⊆ A" using K_def A2_def by blast
have "(⋃N. K N) = A2" using A2_def R_def K_def by blast
moreover have "incseq K" unfolding K_def incseq_def by fastforce
ultimately have "(λN. emeasure M (K N)) ⇢ emeasure M A2" by (auto intro: Lim_emeasure_incseq)
then have conv: "(λN. emeasure M (K N)) ⇢ emeasure M A" using meq by simp
define Bad where "Bad = (λU n. {x ∈ (T^^n)--`U. local_time B n x < k})"
define Bad0 where "Bad0 = (λn. {x ∈ space M. local_time B n x < k})"
have Bad0_meas [measurable]: "Bad0 n ∈ sets M" for n unfolding Bad0_def by auto
have Bad_inter: "⋀U n. Bad U n = (T^^n)--`U ∩ Bad0 n" unfolding Bad_def Bad0_def by auto
have Bad_meas [measurable]: "⋀U n. U ∈ sets M ⟹ Bad U n ∈ sets M" unfolding Bad_def by auto
show ?thesis
proof (rule tendsto_zero_ennreal)
fix e::real
assume "e > 0"
define e2 where "e2 = e/3"
then have "e2 > 0" using ‹e>0› by simp
then have "ennreal e2 > 0" by simp
have "(λN. emeasure M (K N) + e2) ⇢ emeasure M A + e2"
using conv by (intro tendsto_add) auto
moreover have "emeasure M A < emeasure M A + e2" using fin ‹e2 > 0› by simp
ultimately have "eventually (λN. emeasure M (K N) + e2 > emeasure M A) sequentially"
by (simp add: order_tendsto_iff)
then obtain N where "N>0" and largeK: "emeasure M (K N) + e2 > emeasure M A"
by (metis (no_types, lifting) add.commute add_Suc_right eventually_at_top_linorder le_add2 zero_less_Suc)
define S where "S = A - (K N)"
have S_meas [measurable]: "S ∈ sets M" using A_meas K_meas S_def by simp
have "emeasure M A = emeasure M (K N) + emeasure M S"
by (metis Diff_disjoint Diff_partition plus_emeasure[OF K_meas[of N], OF S_meas] S_def K_incl[of N])
then have S_small: "emeasure M S < e2" using largeK fin by simp
have A_incl: "A ⊆ S ∪ (⋃i<N. A2 ∩ (T^^i)--`B)" using S_def K_def by auto
define L where "L = (λi. A2 ∩ (T^^i)--`B)"
have L_meas [measurable]: "L i ∈ sets M" for i unfolding L_def by auto
have "⋀i. L i ⊆ A2" using L_def by simp
then have L_fin: "emeasure M (L i) < ∞" for i
using emeasure_mono[of "L i" A2 M] A2_meas A2_fin by simp
have "⋀i. L i ⊆ (T^^i)--`B" using L_def by auto
then have a: "⋀i. (λn. emeasure M (Bad (L i) n)) ⇢ 0" unfolding Bad_def
using local_time_unbounded2[OF L_meas, OF L_fin] by blast
have "(λn. (∑i<N. emeasure M (Bad (L i) n))) ⇢ 0" using tendsto_sum[OF a] by auto
then have "eventually (λn. (∑i<N. emeasure M (Bad (L i) n)) < e2) sequentially"
using ‹ennreal e2 > 0› order_tendsto_iff by metis
then obtain N2 where *: "⋀n. n > N2 ⟹ (∑i<N. emeasure M (Bad (L i) n)) < e2"
by (auto simp add: eventually_at_top_dense)
have "emeasure M (Bad A n) < e" if "n > N2" for n
proof -
have "emeasure M (Bad S n) ≤ emeasure M ((T^^n)--`S)"
apply (rule emeasure_mono) unfolding Bad_def by auto
also have "... = emeasure M S" using T_vrestr_same_emeasure(2) by simp
also have "... ≤ e2" using S_small by simp
finally have SBad_small: "emeasure M (Bad S n) ≤ e2" by simp
have "(T^^n)--`A ⊆ (T^^n)--`S ∪ (⋃i<N. (T^^n)--`(L i))"
using A_incl unfolding L_def by fastforce
then have I: "Bad A n ⊆ Bad S n ∪ (⋃i<N. Bad (L i) n)" using Bad_inter by force
have "emeasure M (Bad A n) ≤ emeasure M (Bad S n ∪ (⋃i<N. Bad (L i) n))"
by (rule emeasure_mono[OF I], measurable)
also have "... ≤ emeasure M (Bad S n) + emeasure M (⋃i<N. Bad (L i) n)"
by (intro emeasure_subadditive countable_Un_Int(1), auto)
also have "... ≤ emeasure M (Bad S n) + (∑i<N. emeasure M (Bad (L i) n))"
by (simp add: add_left_mono image_subset_iff Bad_meas[OF L_meas]
emeasure_subadditive_finite[OF finite_lessThan[of N], where ?A = "λi. Bad (L i) n"])
also have "... ≤ ennreal e2 + ennreal e2"
using SBad_small less_imp_le[OF *[OF ‹n > N2›]] by (rule add_mono)
also have "... < e" using e2_def ‹e>0› by (simp del: ennreal_plus add: ennreal_plus[symmetric] ennreal_lessI)
finally show "emeasure M (Bad A n) < e" by simp
qed
then show "∀⇩F x in sequentially. emeasure M {xa ∈ (T ^^ x) --` A. local_time B x xa < k} < e"
unfolding eventually_at_top_dense Bad_def by auto
qed
qed
subsection ‹The induced map›
text ‹The map induced by $T$ on a set $A$ is obtained by iterating $T$ until one lands again
in $A$. (Outside of $A$, we take the identity for definiteness.) It has very nice properties:
if $T$ is conservative, then the induced map $T_A$ also is. If $T$ is measure preserving, then
so is $T_A$. (In particular, even if $T$ preserves an infinite measure, $T_A$ is a probability
preserving map if $A$ has measure $1$ -- this makes it possible to prove some statements in
infinite measure by using results in finite measure systems). If $T$ is invertible, then so is
$T_A$. We prove all these properties in this paragraph.›
definition induced_map::"'a set ⇒ ('a ⇒ 'a)"
where "induced_map A = (λ x. (T^^(return_time_function A x)) x)"
text ‹The set $A$ is stabilized by the induced map.›
lemma induced_map_stabilizes_A:
"x ∈ A ⟷ induced_map A x ∈ A"
proof (rule equiv_neg)
fix x assume "x ∈ A"
show "induced_map A x ∈ A"
proof (cases)
assume "x ∉ recurrent_subset A"
then have "induced_map A x = x" using induced_map_def return_time_function_def by simp
then show ?thesis using ‹x ∈ A› by simp
next
assume H: "¬(x ∉ recurrent_subset A)"
define K where "K = {n∈{1..}. (T^^n) x ∈ A}"
have "K ≠ {}" using H recurrent_subset_def K_def by blast
moreover have "return_time_function A x = Inf K" using return_time_function_def K_def H by simp
ultimately have "return_time_function A x ∈ K" using Inf_nat_def1 by simp
then show ?thesis unfolding induced_map_def K_def by blast
qed
next
fix x assume "x ∉ A"
then have "x ∉ recurrent_subset A" using recurrent_subset_def by simp
then have "induced_map A x = x" using induced_map_def return_time_function_def by simp
then show "induced_map A x ∉ A" using ‹x ∉ A› by simp
qed
lemma induced_map_iterates_stabilize_A:
assumes "x ∈ A"
shows "((induced_map A)^^n) x ∈ A"
proof (induction n)
case 0
show ?case using ‹x ∈ A› by auto
next
case (Suc n)
have "((induced_map A)^^(Suc n)) x = (induced_map A) (((induced_map A)^^n) x)" by auto
then show ?case using Suc.IH induced_map_stabilizes_A by auto
qed
lemma induced_map_meas [measurable]:
assumes [measurable]: "A ∈ sets M"
shows "induced_map A ∈ measurable M M"
unfolding induced_map_def by auto
text ‹The iterates of the induced map are given by a power of the original map, where the power
is the Birkhoff sum (for the induced map) of the first return time. This is obvious, but useful.›
lemma induced_map_iterates:
"((induced_map A)^^n) x = (T^^(∑i < n. return_time_function A ((induced_map A ^^i) x))) x"
proof (induction n)
case 0
show ?case by auto
next
case (Suc n)
have "((induced_map A)^^(n+1)) x = induced_map A (((induced_map A)^^n) x)" by (simp add: funpow_add)
also have "... = (T^^(return_time_function A (((induced_map A)^^n) x))) (((induced_map A)^^n) x)"
using induced_map_def by auto
also have "... = (T^^(return_time_function A (((induced_map A)^^n) x))) ((T^^(∑i < n. return_time_function A ((induced_map A ^^i) x))) x)"
using Suc.IH by auto
also have "... = (T^^(return_time_function A (((induced_map A)^^n) x) + (∑i < n. return_time_function A ((induced_map A ^^i) x)))) x"
by (simp add: funpow_add)
also have "... = (T^^(∑i < Suc n. return_time_function A ((induced_map A ^^i) x))) x" by (simp add: add.commute)
finally show ?case by simp
qed
lemma induced_map_stabilizes_recurrent_infty:
assumes "x ∈ recurrent_subset_infty A"
shows "((induced_map A)^^n) x ∈ recurrent_subset_infty A"
proof -
have "x ∈ A" using assms(1) recurrent_subset_incl(2) by auto
define R where "R = (∑i < n. return_time_function A ((induced_map A ^^i) x))"
have *: "((induced_map A)^^n) x = (T^^R) x" unfolding R_def by (rule induced_map_iterates)
moreover have "((induced_map A)^^n) x ∈ A"
by (rule induced_map_iterates_stabilize_A, simp add: ‹x ∈ A›)
ultimately have "(T^^R) x ∈ A" by simp
then show ?thesis using recurrent_subset_infty_returns[OF assms] * by auto
qed
text ‹If $x \in A$, then its successive returns to $A$ are exactly given by the iterations
of the induced map.›
lemma induced_map_returns:
assumes "x ∈ A"
shows "((T^^n) x ∈ A) ⟷ (∃N≤n. n = (∑i<N. return_time_function A ((induced_map A ^^ i) x)))"
proof
assume "(T^^n) x ∈ A"
have "⋀y. y ∈ A ⟹ (T^^n)y ∈ A ⟹ ∃N≤n. n = (∑i<N. return_time_function A (((induced_map A)^^i) y))" for n
proof (induction n rule: nat_less_induct)
case (1 n)
show "∃N≤n. n = (∑i<N. return_time_function A (((induced_map A)^^i) y))"
proof (cases)
assume "n = 0"
then show ?thesis by auto
next
assume "¬(n = 0)"
then have "n > 0" by simp
then have y_rec: "y ∈ recurrent_subset A" using ‹y ∈ A› ‹(T^^n) y ∈ A› recurrent_subset_def by auto
then have *: "return_time_function A y > 0" by (metis DiffE insert_iff neq0_conv vimage_eq return_time0)
define m where "m = return_time_function A y"
have "m > 0" using * m_def by simp
define K where "K = {t ∈ {1..}. (T ^^ t) y ∈ A}"
have "n ∈ K" unfolding K_def using ‹n > 0› ‹(T^^n)y ∈ A› by simp
then have "n ≥ Inf K" by (simp add: cInf_lower)
moreover have "m = Inf K" unfolding m_def K_def return_time_function_def using y_rec by simp
ultimately have "n ≥ m" by simp
define z where "z = induced_map A y"
have "z ∈ A" using ‹y ∈ A› induced_map_stabilizes_A z_def by simp
have "z = (T^^m) y" using induced_map_def y_rec z_def m_def by auto
then have "(T^^(n-m)) z = (T^^n) y" using ‹n ≥ m› funpow_add[of "n-m" m T, symmetric]
by (metis comp_apply le_add_diff_inverse2)
then have "(T^^(n-m)) z ∈ A" using ‹(T^^n) y ∈ A› by simp
moreover have "n-m < n" using ‹m > 0› ‹n > 0› by simp
ultimately obtain N0 where "N0 ≤ n-m" "n-m = (∑i<N0. return_time_function A (((induced_map A)^^i) z))"
using ‹z ∈ A› "1.IH" by blast
then have "n-m = (∑i<N0. return_time_function A (((induced_map A)^^i) (induced_map A y)))"
using z_def by auto
moreover have "⋀i. ((induced_map A)^^i) (induced_map A y) = ((induced_map A)^^(i+1)) y"
by (metis Suc_eq_plus1 comp_apply funpow_Suc_right)
ultimately have "n-m = (∑i<N0. return_time_function A (((induced_map A)^^(i+1)) y))"
by simp
then have "n-m = (∑i ∈ {1..<N0+1}. return_time_function A (((induced_map A)^^i) y))"
using sum.shift_bounds_nat_ivl[of "λi. return_time_function A (((induced_map A)^^i) y)", of 0, of 1, of N0, symmetric]
atLeast0LessThan by auto
moreover have "m = (∑i∈{0..<1}. return_time_function A (((induced_map A)^^i) y))" using m_def by simp
ultimately have "n = (∑i∈{0..<1}. return_time_function A (((induced_map A)^^i) y))
+ (∑i ∈ {1..<N0+1}. return_time_function A (((induced_map A)^^i) y))" using ‹n ≥ m› by simp
then have "n = (∑i∈{0..<N0+1}. return_time_function A (((induced_map A)^^i) y))"
using le_add2 sum.atLeastLessThan_concat by blast
moreover have "N0 + 1 ≤ n" using ‹N0 ≤ n-m› ‹n - m < n› by linarith
ultimately show ?thesis by (metis atLeast0LessThan)
qed
qed
then show "∃N≤n. n = (∑i<N. return_time_function A ((induced_map A ^^ i) x))"
using ‹x ∈ A› ‹(T^^n) x ∈ A› by simp
next
assume "∃N≤n. n = (∑i<N. return_time_function A ((induced_map A ^^ i) x))"
then obtain N where "n = (∑i<N. return_time_function A ((induced_map A ^^ i) x))" by blast
then have "(T^^n) x = ((induced_map A)^^N) x" using induced_map_iterates[of N, of A, of x] by simp
then show "(T^^n) x ∈ A" using ‹x ∈ A› induced_map_iterates_stabilize_A by auto
qed
text ‹If a map is conservative, then the induced map is still conservative.
Note that this statement is not true if one replaces the word "conservative" with "qmpt":
inducion only works well in conservative settings.
For instance, the right translation
on $\mathbb{Z}$ is qmpt, but the induced map on $\mathbb{N}$ (again the right translation) is not,
since the measure of $\{0\}$ is nonzero, while its preimage, the empty set, has zero measure.
To prove conservativity, given a subset $B$ of $A$, there exists some time $n$ such that
$T^{-n} B \cap B$ has positive measure. But this time $n$ corresponds to some returns to $A$ for
the induced map, so $T^{-n} B \cap B$ is included in $\bigcup_m T_A^{-m} B \cap B$, hence
one of these sets must have positive measure.
The fact that the map is qmpt is then deduced from the conservativity.
›
proposition (in conservative) induced_map_conservative:
assumes A_meas: "A ∈ sets M"
shows "conservative (restrict_space M A) (induced_map A)"
proof
have "sigma_finite_measure M" by unfold_locales
then have "sigma_finite_measure (restrict_space M A)"
using sigma_finite_measure_restrict_space assms by auto
then show "∃Aa. countable Aa ∧ Aa ⊆ sets (restrict_space M A) ∧ ⋃Aa = space (restrict_space M A)
∧ (∀a∈Aa. emeasure (restrict_space M A) a ≠ ∞)" using sigma_finite_measure_def by auto
have imp: "⋀B. (B ∈ sets M ∧ B ⊆ A ∧ emeasure M B > 0) ⟹ (∃N>0. emeasure M (((induced_map A)^^N)-`B ∩ B) > 0)"
proof -
fix B
assume assm: "B ∈ sets M ∧ B ⊆ A ∧ emeasure M B > 0"
then have "B ⊆ A" by simp
have inc: "(⋃n∈{1..}. (T^^n)-`B ∩ B) ⊆ (⋃N∈{1..}. ((induced_map A)^^N)-` B ∩ B)"
proof
fix x assume "x ∈ (⋃n∈{1..}. (T^^n)-`B ∩ B)"
then obtain n where "n∈{1..}" and *: "x ∈ (T^^n)-`B ∩ B" by auto
then have "n > 0" by auto
have "x ∈ A" "(T^^n) x ∈ A" using * ‹B ⊆ A› by auto
then obtain N where **: "n = (∑i<N. return_time_function A ((induced_map A ^^ i) x))"
using induced_map_returns by auto
then have "((induced_map A)^^N) x = (T^^n) x" using induced_map_iterates[of N, of A, of x] by simp
then have "((induced_map A)^^N) x ∈ B" using * by simp
then have "x ∈ ((induced_map A)^^N)-` B ∩ B" using * by simp
moreover have "N > 0" using ** ‹n > 0›
by (metis leD lessThan_iff less_nat_zero_code neq0_conv sum.neutral_const sum_mono)
ultimately show "x ∈ (⋃N∈{1..}. ((induced_map A)^^N)-` B ∩ B)" by auto
qed
have B_meas [measurable]: "B ∈ sets M" and B_pos: "emeasure M B > 0" using assm by auto
obtain n where "n > 0" and pos: "emeasure M ((T^^n)-`B ∩ B) > 0"
using conservative[OF B_meas, OF B_pos] by auto
then have "n ∈ {1..}" by auto
have itB_meas: "⋀i. ((induced_map A)^^i)-` B ∩ B ∈ sets M"
using B_meas measurable_compose_n[OF induced_map_meas[OF A_meas]] by (metis Int_assoc measurable_sets sets.Int sets.Int_space_eq1)
then have "(⋃i∈{1..}. ((induced_map A)^^i)-` B ∩ B) ∈ sets M" by measurable
moreover have "(T^^n)-`B ∩ B ⊆ (⋃i∈{1..}. ((induced_map A)^^i)-` B ∩ B)" using inc ‹n ∈ {1..}› by force
ultimately have "emeasure M (⋃i∈{1..}. ((induced_map A)^^i)-` B ∩ B) > 0"
by (metis (no_types, lifting) emeasure_eq_0 zero_less_iff_neq_zero pos)
then have "emeasure M (⋃i∈{1..}. ((induced_map A)^^i)-` B ∩ B) ≠ 0" by simp
have "∃i∈{1..}. emeasure M (((induced_map A)^^i)-` B ∩ B) ≠ 0"
proof (rule ccontr)
assume "¬(∃i∈{1..}. emeasure M (((induced_map A)^^i)-` B ∩ B) ≠ 0)"
then have a: "⋀i. i ∈ {1..} ⟹ ((induced_map A)^^i)-` B ∩ B ∈ null_sets M"
using itB_meas by auto
have "(⋃i∈{1..}. ((induced_map A)^^i)-` B ∩ B) ∈ null_sets M"
by (rule null_sets_UN', simp_all add: a)
then show "False" using ‹emeasure M (⋃i∈{1..}. ((induced_map A)^^i)-` B ∩ B) > 0› by auto
qed
then show "∃N>0. emeasure M (((induced_map A)^^N)-` B ∩ B) > 0"
by (simp add: Bex_def less_eq_Suc_le zero_less_iff_neq_zero)
qed
define K where "K = {B. B ∈ sets M ∧ B ⊆ A}"
have K_stable: "(induced_map A)-`B ∈ K" if "B ∈ K" for B
proof -
have B_meas: "B ∈ sets M" and "B ⊆ A" using that unfolding K_def by auto
then have a: "(induced_map A)-`B ⊆ A" using induced_map_stabilizes_A by auto
then have "(induced_map A)-`B = (induced_map A)-`B ∩ space M" using assms sets.sets_into_space by auto
then have "(induced_map A)-`B ∈ sets M" using induced_map_meas[OF assms] B_meas by (metis vrestr_meas vrestr_of_set)
then show "(induced_map A)-`B ∈ K" unfolding K_def using a by auto
qed
define K0 where "K0 = K ∩ (null_sets M)"
have K0_stable: "(induced_map A)-`B ∈ K0" if "B ∈ K0" for B
proof -
have "B ∈ K" using that unfolding K0_def by simp
then have a: "(induced_map A)-`B ⊆ A" and b: "(induced_map A)-`B ∈ sets M"
using K_stable unfolding K_def by auto
have B_meas [measurable]: "B ∈ sets M" using ‹B ∈ K› unfolding K_def by simp
have B0: "B ∈ null_sets M" using ‹B ∈ K0› unfolding K0_def by simp
have "(induced_map A)-`B ⊆ (⋃n. (T^^n)-`B)" unfolding induced_map_def by auto
then have "(induced_map A)-`B ⊆ (⋃n. (T^^n)-`B ∩ space M)"
using b sets.sets_into_space by simp blast
then have inc: "(induced_map A)-`B ⊆ (⋃n. (T^^n)--`B)" unfolding vimage_restr_def
using sets.sets_into_space [OF B_meas] by simp
have "(T^^n)--`B ∈ null_sets M" for n using B0 T_quasi_preserves_null(2)[OF B_meas] by simp
then have "(⋃n. (T^^n)--`B) ∈ null_sets M" using null_sets_UN by auto
then have "(induced_map A)-`B ∈ null_sets M" using null_sets_subset[OF _ b inc] by auto
then show "(induced_map A)-`B ∈ K0" unfolding K0_def K_def by (simp add: a b)
qed
have *: "D ∈ null_sets M ⟷ D ∈ null_sets (restrict_space M A)" if "D∈K" for D
using that unfolding K_def apply auto
apply (metis assms emeasure_restrict_space null_setsD1 null_setsI sets.Int_space_eq2 sets_restrict_space_iff)
by (metis assms emeasure_restrict_space null_setsD1 null_setsI sets.Int_space_eq2)
show "induced_map A ∈ quasi_measure_preserving (restrict_space M A) (restrict_space M A)"
unfolding quasi_measure_preserving_def
proof (auto)
have "induced_map A ∈ A → A" using induced_map_stabilizes_A by auto
then show a: "induced_map A ∈ measurable (restrict_space M A) (restrict_space M A)"
using measurable_restrict_space3[where ?A = A and ?B = A and ?M = M and ?N = M] induced_map_meas[OF A_meas] by auto
fix B assume H: "B ∈ sets (restrict_space M A)"
"induced_map A -`B ∩ space (restrict_space M A) ∈ null_sets (restrict_space M A)"
then have "B ∈ K" unfolding K_def by (metis assms mem_Collect_eq sets.Int_space_eq2 sets_restrict_space_iff)
then have B_meas [measurable]: "B ∈ sets M" and B_incl: "B ⊆ A" unfolding K_def by auto
have "induced_map A -`B ∈ K" using K_stable ‹B ∈ K› by auto
then have B2_meas: "induced_map A -`B ∈ sets M" and B2_incl: "induced_map A -`B ⊆ A"
unfolding K_def by auto
have "induced_map A -` B = induced_map A -`B ∩ space (restrict_space M A)"
using B2_incl by (simp add: Int_absorb2 assms space_restrict_space)
then have "induced_map A -` B ∈ null_sets (restrict_space M A)" using H(2) by simp
then have "induced_map A -` B ∈ K0" unfolding K0_def using ‹induced_map A -`B ∈ K› * by auto
{
fix n
have *: "((induced_map A)^^(n+1))-`B ∈ K0"
proof (induction n)
case (Suc n)
have "((induced_map A)^^(Suc n+1))-`B = (induced_map A)-`(((induced_map A)^^(n+1))-` B)"
by (metis Suc_eq_plus1 funpow_Suc_right vimage_comp)
then show ?case by (metis Suc.IH K0_stable)
qed (auto simp add: ‹induced_map A -` B ∈ K0›)
have **: "((induced_map A)^^(n+1))-` B ∈ sets M" using * K0_def K_def by auto
have "((induced_map A)^^(n+1))-` B ∩ B ∈ null_sets M"
apply (rule null_sets_subset[of "((induced_map A)^^(n+1))-`B"])
using * unfolding K0_def apply simp
using ** by auto
}
then have "((induced_map A)^^n)-` B ∩ B ∈ null_sets M" if "n>0" for n
using that by (metis Suc_eq_plus1 neq0_conv not0_implies_Suc)
then have "B ∈ null_sets M" using imp B_incl B_meas zero_less_iff_neq_zero inf.strict_order_iff
by (metis null_setsD1 null_setsI)
then show "B ∈ null_sets (restrict_space M A)" using * ‹B ∈ K› by auto
next
fix B assume H: "B ∈ sets (restrict_space M A)"
"B ∈ null_sets (restrict_space M A)"
then have "B ∈ K" unfolding K_def by (metis assms mem_Collect_eq sets.Int_space_eq2 sets_restrict_space_iff)
then have B_meas [measurable]: "B ∈ sets M" and B_incl: "B ⊆ A" unfolding K_def by auto
have "B ∈ null_sets M" using * H(2) ‹B ∈ K› by simp
then have "B ∈ K0" unfolding K0_def using ‹B ∈ K› by simp
then have inK: "(induced_map A)-`B ∈ K0" using K0_stable by auto
then have inA: "(induced_map A)-`B ⊆ A" unfolding K0_def K_def by auto
then have "(induced_map A)-`B = (induced_map A)-`B ∩ space (restrict_space M A)"
by (simp add: Int_absorb2 assms space_restrict_space2)
then show "induced_map A -` B ∩ space (restrict_space M A) ∈ null_sets (restrict_space M A)"
using * inK unfolding K0_def by auto
qed
fix B
assume B_measA: "B ∈ sets (restrict_space M A)" and B_posA: "0 < emeasure (restrict_space M A) B"
then have B_meas: "B ∈ sets M" by (metis assms sets.Int_space_eq2 sets_restrict_space_iff)
have B_incl: "B ⊆ A" by (metis B_measA assms sets.Int_space_eq2 sets_restrict_space_iff)
then have B_pos: "0 < emeasure M B" using B_posA by (simp add: assms emeasure_restrict_space)
obtain N where "N>0" "emeasure M (((induced_map A)^^N)-`B ∩ B) > 0" using imp B_meas B_incl B_pos by auto
then have "emeasure (restrict_space M A) ((induced_map A ^^ N) -` B ∩ B) > 0"
using assms emeasure_restrict_space by (metis B_incl Int_lower2 sets.Int_space_eq2 subset_trans)
then show "∃n>0. 0 < emeasure (restrict_space M A) ((induced_map A ^^ n) -` B ∩ B)"
using ‹N > 0› by auto
qed
text ‹Now, we want to prove that, if a map is conservative and measure preserving, then
the induced map is also measure preserving. We first prove it for subsets $W$ of $A$ of finite
measure, the general case will readily follow.
The argument uses the fact that the preimage of the set of points with first entrance time $n$
is the union of the set of points with first entrance time $n+1$, and the points of $A$ with
first return $n+1$. Following the preimage of $W$ under this process, we will get the intersection
of $T_A^{-1} W$ with the different elements of the return partition, and the points in $T^{-n}W$
whose first $n-1$ iterates do not meet $A$ (and the measures of these sets add up to $\mu(W)$).
To conclude, it suffices to show that the measure of points in $T^{-n}W$
whose first $n-1$ iterates do not meet $A$ tends to $0$. This follows from our local times
estimates above.›
lemma (in conservative_mpt) induced_map_measure_preserving_aux:
assumes A_meas [measurable]: "A ∈ sets M"
and W_meas [measurable]: "W ∈ sets M"
and incl: "W ⊆ A"
and fin: "emeasure M W < ∞"
shows "emeasure M ((induced_map A)--`W) = emeasure M W"
proof -
have "W ⊆ space M" using W_meas
using sets.sets_into_space by blast
define BW where "BW = (λn. (first_entrance_set A n) ∩ (T^^n)--`W)"
define DW where "DW = (λn. (return_time_function A)-` {n} ∩ (induced_map A)--`W)"
have "⋀n. DW n = (return_time_function A)-` {n} ∩ space M ∩ (induced_map A)--`W"
using DW_def by auto
then have DW_meas [measurable]: "⋀n. DW n ∈ sets M" by auto
have disj_DW: "disjoint_family (λn. DW n)" using DW_def disjoint_family_on_def by blast
then have disj_DW2: "disjoint_family (λn. DW (n+1))" by (simp add: disjoint_family_on_def)
have "(⋃n. DW n) = DW 0 ∪ (⋃n. DW (n+1))" by (auto) (metis not0_implies_Suc)
moreover have "(DW 0) ∩ (⋃n. DW (n+1)) = {}"
by (auto) (metis IntI Suc_neq_Zero UNIV_I empty_iff disj_DW disjoint_family_on_def)
ultimately have *: "emeasure M (⋃n. DW n) = emeasure M (DW 0) + emeasure M (⋃n. DW (n+1))"
by (simp add: countable_Un_Int(1) plus_emeasure)
have "DW 0 = (return_time_function A)-` {0} ∩ W"
unfolding DW_def induced_map_def return_time_function_def
apply (auto simp add: return_time0[of A]) using sets.sets_into_space[OF W_meas] by auto
also have "... = W - recurrent_subset A" using return_time0 by blast
also have "... ⊆ A - recurrent_subset A" using incl by blast
finally have "DW 0 ∈ null_sets M" by (metis A_meas DW_meas null_sets_subset Poincare_recurrence_thm(1))
then have "emeasure M (DW 0) = 0" by auto
have "(induced_map A)--`W = (⋃n. DW n)" using DW_def by blast
then have "emeasure M ((induced_map A)--`W) = emeasure M (⋃n. DW n)" by simp
also have "... = emeasure M (⋃n. DW (n+1))" using * ‹emeasure M (DW 0) = 0› by simp
also have "... = (∑n. emeasure M (DW (n+1)))"
apply (rule suminf_emeasure[symmetric]) using disj_DW2 by auto
finally have m: "emeasure M ((induced_map A)--`W) = (∑n. emeasure M (DW (n+1)))" by simp
moreover have "summable (λn. emeasure M (DW (n+1)))" by simp
ultimately have lim: "(λN. (∑ n∈{..<N}. emeasure M (DW (n+1)))) ⇢ emeasure M ((induced_map A)--`W)"
by (simp add: summable_LIMSEQ)
have BW_meas [measurable]: "⋀n. BW n ∈ sets M" unfolding BW_def by simp
have *: "⋀n. T--`(BW n) - A = BW (n+1)"
proof -
fix n
have "T--`(BW n) = T--`(first_entrance_set A n) ∩ (T^^(n+1))--`W"
unfolding BW_def by (simp add: assms(2) T_vrestr_composed(2))
then have "T--`(BW n) - A = (T--`(first_entrance_set A n) - A) ∩ (T^^(n+1))--`W"
by blast
then have "T--`(BW n) - A = first_entrance_set A (n+1) ∩ (T^^(n+1))--`W"
using first_entrance_rec[OF A_meas] by simp
then show "T--`(BW n) - A = BW (n+1)" using BW_def by simp
qed
have **: "DW (n+1) = T--`(BW n) ∩ A" for n
proof -
have "T--`(BW n) = T--`(first_entrance_set A n) ∩ (T^^(n+1))--`W"
unfolding BW_def by (simp add: assms(2) T_vrestr_composed(2))
then have "T--`(BW n) ∩ A = (T--`(first_entrance_set A n) ∩ A) ∩ (T^^(n+1))--`W"
by blast
then have *: "T--`(BW n) ∩ A = (return_time_function A)-`{n+1} ∩ (T^^(n+1))--`W"
using return_time_rec[OF A_meas] by simp
have "DW (n+1) = (return_time_function A)-`{n+1} ∩ (induced_map A)-`W"
using DW_def ‹W ⊆ space M› return_time_rec by auto
also have "... = (return_time_function A)-`{n+1} ∩ (T^^(n+1))-`W"
by (auto simp add: induced_map_def)
also have "... = (return_time_function A)-`{n+1} ∩ (T^^(n+1))--`W"
using ‹W ⊆ space M› return_time_rec by auto
finally show "DW (n+1) = T--`(BW n) ∩ A" using * by simp
qed
have "emeasure M W = (∑ n∈{..<N}. emeasure M (DW (n+1))) + emeasure M (BW N)" for N
proof (induction N)
case 0
have "BW 0 = W" unfolding BW_def first_entrance_set_def using incl by auto
then show ?case by simp
next
case (Suc N)
have "T--`(BW N) = BW (N+1) ∪ DW (N+1)" using * ** by blast
moreover have "BW (N+1) ∩ DW (N+1) = {}" using * ** by blast
ultimately have "emeasure M (T--`(BW N)) = emeasure M (BW (N+1)) + emeasure M (DW (N+1))"
using DW_meas BW_meas plus_emeasure[of "BW (N+1)"] by simp
then have "emeasure M (BW N) = emeasure M (BW (N+1)) + emeasure M (DW (N+1))"
using T_vrestr_same_emeasure(1) BW_meas by auto
then have "(∑ n∈{..<N}. emeasure M (DW (n+1))) + emeasure M (BW N)
= (∑ n∈{..<N+1}. emeasure M (DW (n+1))) + emeasure M (BW (N+1))"
by (simp add: add.commute add.left_commute)
then show ?case using Suc.IH by simp
qed
moreover
have "(λN. emeasure M (BW N)) ⇢ 0"
proof (rule tendsto_sandwich[of "λ_. 0"_ _ "λN. emeasure M {x ∈ (T^^N)--`W. local_time A N x < 1}"])
have "emeasure M (BW N) ≤ emeasure M {x ∈ (T^^N)--`W. local_time A N x < 1}" for N
apply (rule emeasure_mono) unfolding BW_def local_time_def first_entrance_set_def by auto
then show "∀⇩F n in sequentially. emeasure M (BW n) ≤ emeasure M {x ∈ (T ^^ n) --` W. local_time A n x < 1}"
by auto
have i: "W ⊆ (T^^0)--`A" using incl by auto
show "(λN. emeasure M {x ∈ (T ^^ N) --` W. local_time A N x < 1}) ⇢ 0"
apply (rule local_time_unbounded2[OF _ _ i]) using fin by auto
qed (auto)
then have "(λN. (∑ n∈{..<N}. emeasure M (DW (n+1))) + emeasure M (BW N)) ⇢ emeasure M (induced_map A --` W) + 0"
using lim by (intro tendsto_add) auto
ultimately show ?thesis
by (auto intro: LIMSEQ_unique LIMSEQ_const_iff)
qed
lemma (in conservative_mpt) induced_map_measure_preserving:
assumes A_meas [measurable]: "A ∈ sets M"
and W_meas [measurable]: "W ∈ sets M"
shows "emeasure M ((induced_map A)--`W) = emeasure M W"
proof -
define WA where "WA = W ∩ A"
have WA_meas [measurable]: "WA ∈ sets M" "WA ⊆ A" using WA_def by auto
have WAi_meas [measurable]: "(induced_map A)--`WA ∈ sets M" by simp
have a: "emeasure M WA = emeasure M ((induced_map A)--`WA)"
proof (cases)
assume "emeasure M WA < ∞"
then show ?thesis using induced_map_measure_preserving_aux[OF A_meas, OF ‹WA ∈ sets M›, OF ‹WA ⊆ A›] by simp
next
assume "¬(emeasure M WA < ∞)"
then have "emeasure M WA = ∞" by (simp add: less_top[symmetric])
{
fix C::real
obtain Z where "Z ∈ sets M" "Z ⊆ WA" "emeasure M Z < ∞" "emeasure M Z > C"
by (blast intro: ‹emeasure M WA = ∞› WA_meas approx_PInf_emeasure_with_finite)
have "Z ⊆ A" using ‹Z ⊆ WA› WA_def by simp
have "C < emeasure M Z" using ‹emeasure M Z > C› by simp
also have "... = emeasure M ((induced_map A)--`Z)"
using induced_map_measure_preserving_aux[OF A_meas, OF ‹Z ∈ sets M›, OF ‹Z ⊆ A›] ‹emeasure M Z < ∞› by simp
also have "... ≤ emeasure M ((induced_map A)--`WA)"
apply(rule emeasure_mono) using ‹Z ⊆ WA› vrestr_inclusion by auto
finally have "emeasure M ((induced_map A)--`WA) > C" by simp
}
then have "emeasure M ((induced_map A)--`WA) = ∞"
by (cases "emeasure M ((induced_map A)--`WA)") auto
then show ?thesis using ‹emeasure M WA = ∞› by simp
qed
define WB where "WB = W - WA"
have WB_meas [measurable]: "WB ∈ sets M" unfolding WB_def by simp
have WBi_meas [measurable]: "(induced_map A)--`WB ∈ sets M" by simp
have "WB ∩ A = {}" unfolding WB_def WA_def by auto
moreover have id: "⋀x. x ∉ A ⟹ (induced_map A x) = x" unfolding induced_map_def return_time_function_def
apply (auto) using recurrent_subset_incl by auto
ultimately have "(induced_map A)--`WB = WB"
using induced_map_stabilizes_A sets.sets_into_space[OF WB_meas] apply auto
by (metis disjoint_iff_not_equal) fastforce+
then have b: "emeasure M ((induced_map A)--`WB) = emeasure M WB" by simp
have "W = WA ∪ WB" "WA ∩ WB = {}" using WA_def WB_def by auto
have *: "emeasure M W = emeasure M WA + emeasure M WB"
by (subst ‹W = WA ∪ WB›, rule plus_emeasure[symmetric], auto simp add: ‹WA ∩ WB = {}›)
have W_AUB: "(induced_map A)--`W = (induced_map A)--`WA ∪ (induced_map A)--`WB"
using ‹W = WA ∪ WB› by auto
have W_AIB: "(induced_map A)--`WA ∩ (induced_map A)--`WB = {}"
by (metis ‹WA ∩ WB = {}› vrestr_empty vrestr_intersec)
have "emeasure M ((induced_map A)--`W) = emeasure M ((induced_map A)--`WA) + emeasure M ((induced_map A)--`WB)"
unfolding W_AUB by (rule plus_emeasure[symmetric]) (auto simp add: W_AIB)
then show ?thesis using a b * by simp
qed
text ‹We can now express the fact that induced maps preserve the measure.›
theorem (in conservative_mpt) induced_map_conservative_mpt:
assumes "A ∈ sets M"
shows "conservative_mpt (restrict_space M A) (induced_map A)"
unfolding conservative_mpt_def
proof
show *: "conservative (restrict_space M A) (induced_map A)" using induced_map_conservative[OF assms] by auto
show "mpt (restrict_space M A) (induced_map A)" unfolding mpt_def mpt_axioms_def
proof
show "qmpt (restrict_space M A) (induced_map A)" using * conservative_def by auto
then have meas: "(induced_map A) ∈ measurable (restrict_space M A) (restrict_space M A)"
unfolding qmpt_def qmpt_axioms_def quasi_measure_preserving_def by auto
moreover have "⋀B. B ∈ sets (restrict_space M A) ⟹
emeasure (restrict_space M A) ((induced_map A) -`B ∩ space (restrict_space M A)) = emeasure (restrict_space M A) B"
proof -
have s: "space (restrict_space M A) = A" using assms space_restrict_space2 by auto
have i: "⋀D. D ∈ sets M ∧ D ⊆ A ⟹ emeasure (restrict_space M A) D = emeasure M D"
using assms by (simp add: emeasure_restrict_space)
have j: "⋀D. D ∈ sets (restrict_space M A) ⟷ (D ∈ sets M ∧ D ⊆ A)" using assms
by (metis sets.Int_space_eq2 sets_restrict_space_iff)
fix B
assume a: "B ∈ sets (restrict_space M A)"
then have B_meas: "B ∈ sets M" using j by auto
then have first: "emeasure (restrict_space M A) B = emeasure M B" using i j a by auto
have incl: "(induced_map A) -`B ⊆ A" using j a induced_map_stabilizes_A assms by auto
then have eq: "(induced_map A) -`B ∩ space (restrict_space M A) = (induced_map A) --`B"
unfolding vimage_restr_def s using assms sets.sets_into_space
by (metis a inf.orderE j meas measurable_sets s)
then have "emeasure M B = emeasure M ((induced_map A) -`B ∩ space (restrict_space M A))"
using induced_map_measure_preserving a j assms by auto
also have "... = emeasure (restrict_space M A) ((induced_map A) -`B ∩ space (restrict_space M A))"
using incl eq B_meas induced_map_meas[OF assms] assms i j
by (metis emeasure_restrict_space inf.orderE s space_restrict_space)
finally show "emeasure (restrict_space M A) ((induced_map A) -`B ∩ space (restrict_space M A))
= emeasure (restrict_space M A) B"
using first by auto
qed
ultimately show "induced_map A ∈ measure_preserving (restrict_space M A) (restrict_space M A)"
unfolding measure_preserving_def by auto
qed
qed
theorem (in fmpt) induced_map_fmpt:
assumes "A ∈ sets M"
shows "fmpt (restrict_space M A) (induced_map A)"
unfolding fmpt_def
proof -
have "conservative_mpt (restrict_space M A) (induced_map A)" using induced_map_conservative_mpt[OF assms] by simp
then have "mpt (restrict_space M A) (induced_map A)" using conservative_mpt_def by auto
moreover have "finite_measure (restrict_space M A)" by (simp add: assms finite_measureI finite_measure_restrict_space)
ultimately show "mpt (restrict_space M A) (induced_map A) ∧ finite_measure (restrict_space M A)" by simp
qed
text ‹It will be useful to reformulate the fact that the recurrent subset has full measure
in terms of the induced measure, to simplify the use of the induced map later on.›
lemma (in conservative) induced_map_recurrent_typical:
assumes A_meas [measurable]: "A ∈ sets M"
shows "AE z in (restrict_space M A). z ∈ recurrent_subset A"
"AE z in (restrict_space M A). z ∈ recurrent_subset_infty A"
proof -
have "recurrent_subset A ∈ sets M" using recurrent_subset_meas[OF A_meas] by auto
then have rsA: "recurrent_subset A ∈ sets (restrict_space M A)"
using recurrent_subset_incl(1)[of A]
by (metis (no_types, lifting) A_meas sets_restrict_space_iff space_restrict_space space_restrict_space2)
have "emeasure (restrict_space M A) (space (restrict_space M A) - recurrent_subset A) = emeasure (restrict_space M A) (A - recurrent_subset A)"
by (metis (no_types, lifting) A_meas space_restrict_space2)
also have "... = emeasure M (A - recurrent_subset A)"
by (simp add: emeasure_restrict_space)
also have "... = 0" using Poincare_recurrence_thm[OF A_meas] by auto
finally have "space (restrict_space M A) - recurrent_subset A ∈ null_sets (restrict_space M A)"
using rsA by blast
then show "AE z in (restrict_space M A). z ∈ recurrent_subset A"
by (metis (no_types, lifting) DiffI eventually_ae_filter mem_Collect_eq subsetI)
have "recurrent_subset_infty A ∈ sets M" using recurrent_subset_meas[OF A_meas] by auto
then have rsiA: "recurrent_subset_infty A ∈ sets (restrict_space M A)"
using recurrent_subset_incl(2)[of A]
by (metis (no_types, lifting) A_meas sets_restrict_space_iff space_restrict_space space_restrict_space2)
have "emeasure (restrict_space M A) (space (restrict_space M A) - recurrent_subset_infty A) = emeasure (restrict_space M A) (A - recurrent_subset_infty A)"
by (metis (no_types, lifting) A_meas space_restrict_space2)
also have "... = emeasure M (A - recurrent_subset_infty A)"
apply (rule emeasure_restrict_space) using A_meas by auto
also have "... = 0" using Poincare_recurrence_thm[OF A_meas] by auto
finally have "space (restrict_space M A) - recurrent_subset_infty A ∈ null_sets (restrict_space M A)"
using rsiA by blast
then show "AE z in (restrict_space M A). z ∈ recurrent_subset_infty A"
by (metis (no_types, lifting) DiffI eventually_ae_filter mem_Collect_eq subsetI)
qed
subsection ‹Kac's theorem, and variants›
text ‹Kac's theorem states that, for conservative maps, the integral of the return time
to a subset $A$ is equal to the measure of the space if the dynamics is ergodic, or of the
space seen by $A$ in the general case.
This result generalizes to any induced function, not just the return time, that we define now.›
definition induced_function::"'a set ⇒ ('a ⇒ 'b::comm_monoid_add) ⇒ ('a ⇒ 'b)"
where "induced_function A f = (λx. (∑i∈{..< return_time_function A x}. f((T^^i) x)))"
text ‹By definition, the induced function is supported on the recurrent subset of $A$.›
lemma induced_function_support:
fixes f::"'a ⇒ ennreal"
shows "induced_function A f y = induced_function A f y * indicator ((return_time_function A)-`{1..}) y"
by (auto simp add: induced_function_def indicator_def not_less_eq_eq)
text ‹Basic measurability statements.›
lemma induced_function_meas_ennreal [measurable]:
fixes f::"'a ⇒ ennreal"
assumes [measurable]: "f ∈ borel_measurable M" "A ∈ sets M"
shows "induced_function A f ∈ borel_measurable M"
unfolding induced_function_def by simp
lemma induced_function_meas_real [measurable]:
fixes f::"'a ⇒ real"
assumes [measurable]: "f ∈ borel_measurable M" "A ∈ sets M"
shows "induced_function A f ∈ borel_measurable M"
unfolding induced_function_def by simp
text ‹The Birkhoff sums of the induced function for the induced map form a subsequence of the original
Birkhoff sums for the original map, corresponding to the return times to $A$.›
lemma (in conservative) induced_function_birkhoff_sum:
fixes f::"'a ⇒ real"
assumes "A ∈ sets M"
shows "birkhoff_sum f (qmpt.birkhoff_sum (induced_map A) (return_time_function A) n x) x
= qmpt.birkhoff_sum (induced_map A) (induced_function A f) n x"
proof -
interpret A: conservative "restrict_space M A" "induced_map A" by (rule induced_map_conservative[OF assms])
define TA where "TA = induced_map A"
define phiA where "phiA = return_time_function A"
define R where "R = (λn. A.birkhoff_sum phiA n x)"
show ?thesis
proof (induction n)
case 0
show ?case using birkhoff_sum_1(1) A.birkhoff_sum_1(1) by auto
next
case (Suc n)
have "(T^^(R n)) x = (TA^^n) x" unfolding TA_def R_def A.birkhoff_sum_def phiA_def by (rule induced_map_iterates[symmetric])
have "R(n+1) = R n + phiA ((TA^^n) x)"
unfolding R_def using A.birkhoff_sum_cocycle[where ?n = n and ?m = 1 and ?f = phiA] A.birkhoff_sum_1(2) TA_def by auto
then have "birkhoff_sum f (R (n+1)) x = birkhoff_sum f (R n) x + birkhoff_sum f (phiA ((TA^^n) x)) ((T^^(R n)) x)"
using birkhoff_sum_cocycle[where ?n = "R n" and ?f = f] by auto
also have "... = birkhoff_sum f (R n) x + birkhoff_sum f (phiA ((TA^^n) x)) ((TA^^n) x)"
using ‹(T^^(R n)) x = (TA^^n) x› by simp
also have "... = birkhoff_sum f (R n) x + (induced_function A f) ((TA^^n) x)"
unfolding induced_function_def birkhoff_sum_def phiA_def by simp
also have "... = A.birkhoff_sum (induced_function A f) n x + (induced_function A f) ((TA^^n) x)" using Suc.IH R_def phiA_def by auto
also have "... = A.birkhoff_sum (induced_function A f) (n+1) x"
using A.birkhoff_sum_cocycle[where ?n = n and ?m = 1 and ?f = "induced_function A f" and ?x = x, symmetric]
A.birkhoff_sum_1(2)[where ?f = "induced_function A f" and ?x = "(TA^^n) x"]
unfolding TA_def by auto
finally show ?case unfolding R_def phiA_def by simp
qed
qed
text ‹The next lemma is very simple (just a change of variables to reorder
the indices in the double sum). However, the proof I give is very tedious:
infinite sums on proper subsets are not handled well, hence I use integrals
on products of discrete spaces instead, and go back and forth between the two
notions -- maybe there are better suited tools in the library, but I could
not locate them...
This is the main combinatorial tool used in the proof of Kac's Formula.›
lemma kac_series_aux:
fixes d:: "nat ⇒ nat ⇒ ennreal"
shows "(∑n. (∑i≤n. d i n)) = (∑n. d 0 n) + (∑n. (∑i. d (i+1) (n+1+i)))"
(is "_ = ?R")
proof -
define g where "g = (λ(i,n). (i+(1::nat), n+1+i))"
define U where "U = {(i,n). (i>(0::nat)) ∧ (n≥i)}"
have bij: "bij_betw g UNIV U"
by (rule bij_betw_byWitness[where ?f' = "λ(i, n). (i-1, n-i)"], auto simp add: g_def U_def)
define e where "e = (λ (i,j). d i j)"
have pos: "⋀x. e x ≥ 0" using e_def by auto
have "(∑n. (∑i. d (i+1) (n+1+i))) = (∑n. (∑i. e(i+1, n+1+i)))" using e_def by simp
also have "... = ∫⇧+n. ∫⇧+i. e (i+1, n+1+i) ∂count_space UNIV ∂count_space UNIV"
using pos nn_integral_count_space_nat suminf_0_le by auto
also have "... = (∫⇧+x. e (g x) ∂count_space UNIV)"
unfolding g_def using nn_integral_snd_count_space[of "λ(i,n). e(i+1, n+1+i)"]
by (auto simp add: prod.case_distrib)
also have "... = (∫⇧+y ∈ U. e y ∂count_space UNIV)"
using nn_integral_count_compose_bij[OF bij] by simp
finally have *: "(∑n. (∑i. d (i+1) (n+1+i))) = (∫⇧+y ∈ U. e y ∂count_space UNIV)"
by simp
define V where "V = {((i::nat),(n::nat)). i = 0}"
have i: "e (i, n) * indicator {0} i = e (i, n) * indicator V (i, n)" for i n
by (auto simp add: indicator_def V_def)
have "d 0 n = (∫⇧+i ∈ {0}. e (i, n) ∂count_space UNIV)" for n
proof -
have "(∫⇧+i ∈ {0}. e (i, n) ∂count_space UNIV) = (∫⇧+i. e (i, n) ∂count_space {0})"
using nn_integral_count_space_indicator[of _ "λi. e(i, n)"] by simp
also have "... = e (0, n)"
using nn_integral_count_space_finite[where ?f = "λi. e (i, n)"] by simp
finally show ?thesis using e_def by simp
qed
then have "(∑n. d 0 n) = (∑n. (∫⇧+i. e (i, n) * indicator {0} i ∂count_space UNIV))"
by simp
also have "... = (∫⇧+n. (∫⇧+i. e (i, n) * indicator {0} i ∂count_space UNIV) ∂count_space UNIV)"
by (simp add: nn_integral_count_space_nat)
also have "... = (∫⇧+(i,n). e (i, n) * indicator {0} i ∂count_space UNIV)"
using nn_integral_snd_count_space[of "λ (i,n). e(i,n) * indicator {0} i"] by auto
also have "... = (∫⇧+(i,n). e (i, n) * indicator V (i,n) ∂count_space UNIV)"
by (metis i)
finally have "(∑n. d 0 n) = (∫⇧+y ∈ V. e y ∂count_space UNIV)"
by (simp add: split_def)
then have "?R = (∫⇧+y ∈ V. e y ∂count_space UNIV) + (∫⇧+y ∈ U. e y ∂count_space UNIV)"
using * by simp
also have "... = (∫⇧+y ∈ V ∪ U. e y ∂count_space UNIV)"
by (rule nn_integral_disjoint_pair_countspace[symmetric], auto simp add: U_def V_def)
also have "... = (∫⇧+(i, n). e (i, n) * indicator {..n} i ∂count_space UNIV)"
by (rule nn_integral_cong, auto simp add: indicator_def of_bool_def V_def U_def pos, meson)
also have "... = (∫⇧+n. (∫⇧+i. e (i, n) * indicator {..n} i ∂count_space UNIV)∂count_space UNIV)"
using nn_integral_snd_count_space[of "λ(i,n). e(i,n) * indicator {..n} i"] by auto
also have "... = (∑n. (∑i. e (i, n) * indicator {..n} i))"
using pos nn_integral_count_space_nat suminf_0_le by auto
moreover have "(∑i. e (i, n) * indicator {..n} i) = (∑i≤n. e (i, n))" for n
proof -
have "finite {..n}" by simp
moreover have "⋀i. i ∉ {..n} ⟹ e (i,n) * indicator {..n} i = 0" using indicator_def by simp
then have "(∑i. e (i,n) * indicator {..n} i) = (∑i ∈ {..n} . e (i, n) * indicator {..n} i)"
by (meson calculation suminf_finite)
moreover have "⋀i. i ∈ {..n} ⟹ e (i, n) * indicator {..n} i = e (i, n)" using indicator_def by auto
ultimately show "(∑i. e (i, n) * indicator {..n} i) = (∑i≤n. e (i, n))" by simp
qed
ultimately show ?thesis using e_def by simp
qed
end
context conservative_mpt begin
text ‹We prove Kac's Formula (in the general form for induced functions) first for
functions taking values in ennreal (to avoid all summabilities issues). The result for
real functions will follow by domination. First, we assume additionally that $f$ is bounded
and has a support of finite measure, the general case will follow readily by truncation.
The proof is again an instance of the fact that the preimage of the set of points
with first entrance time $n$ is the union of the set of points with first entrance time $n+1$,
and the points of $A$ with first return $n+1$. Keeping track of the integral of $f$ on the
different parts that appear in this argument, we will see that the integral of the induced
function on the set of points with return time at most $n$ is equal to the integral of the function,
up to an error controlled by the measure of points in $T^{-n}(supp(f))$ with local time $0$.
Local time controls ensure that this contribution vanishes asymptotically.
›
lemma induced_function_nn_integral_aux:
fixes f::"'a ⇒ ennreal"
assumes A_meas [measurable]: "A ∈ sets M"
and f_meas [measurable]: "f ∈ borel_measurable M"
and f_bound: "⋀x. f x ≤ ennreal C" "0 ≤ C"
and f_supp: "emeasure M {x ∈ space M. f x > 0} < ∞"
shows "(∫⇧+y. induced_function A f y ∂M) = (∫⇧+ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
proof -
define B where "B = (λn. first_entrance_set A n)"
have B_meas [measurable]: "⋀n. B n ∈ sets M" by (simp add: B_def)
then have B2 [measurable]: "(⋃n. B (n+1)) ∈ sets M" by measurable
have *: "B = disjointed (λi. (T^^i)--`A)"
by (auto simp add: B_def disjointed_def first_entrance_set_def)
then have "disjoint_family B" by (simp add: disjoint_family_disjointed)
have "(⋃n. (T^^n)--`A) = (⋃n. disjointed (λi. (T^^i)--`A) n)" by (simp add: UN_disjointed_eq)
then have "(⋃n. (T^^n)--`A) = (⋃n. B n)" using * by simp
then have "(⋃n. (T^^n)--`A) = B 0 ∪ (⋃n. B (n+1))" by (auto) (metis not0_implies_Suc)
then have "(∫⇧+ x ∈ (⋃n. (T^^n)--`A). f x ∂M) = (∫⇧+ x ∈ (B 0 ∪ (⋃n. B (n+1))). f x ∂M)" by simp
also have "... = (∫⇧+ x ∈ B 0. f x ∂M) + (∫⇧+ x ∈ (⋃n. B (n+1)). f x ∂M)"
proof (rule nn_integral_disjoint_pair)
show "B 0 ∩ (⋃n. B (n+1)) = {}"
by (auto) (metis IntI Suc_neq_Zero UNIV_I empty_iff ‹disjoint_family B› disjoint_family_on_def)
qed auto
finally have "(∫⇧+ x ∈ (⋃n. (T^^n)--`A). f x ∂M) = (∫⇧+ x ∈ B 0. f x ∂M) + (∫⇧+ x ∈ (⋃n. B (n+1)). f x ∂M)"
by simp
moreover have "(∫⇧+ x ∈ (⋃n. B (n+1)). f x ∂M) = (∑n. (∫⇧+ x ∈ B (n+1). f x ∂M))"
apply (rule nn_integral_disjoint_family) using ‹disjoint_family B› by (auto simp add: disjoint_family_on_def)
ultimately have Bdec: "(∫⇧+ x ∈ (⋃n. (T^^n)--`A). f x ∂M) = (∫⇧+ x ∈ B 0. f x ∂M) + (∑n. ∫⇧+ x ∈ B (n+1). f x ∂M)" by simp
define D where "D = (λn. (return_time_function A)-` {n+1})"
then have "disjoint_family D" by (auto simp add: disjoint_family_on_def)
have *: "⋀n. D n = T--`(B n) ∩ A"
using D_def B_def return_time_rec[OF assms(1)] by simp
then have [measurable]: "⋀n. D n ∈ sets M" by simp
have **: "⋀n. B (n+1) = T--`(B n) - A" using first_entrance_rec[OF assms(1)] B_def by simp
have pos0: "⋀i x. f((T^^i)x) ≥ 0" using assms(3) by simp
have pos:"⋀ i C x. f((T^^i)x) * indicator (C) x ≥ 0" using assms(3) by simp
have mes0 [measurable]: "⋀ i. (λx. f((T^^i)x)) ∈ borel_measurable M" by simp
then have [measurable]: "⋀ i C. C ∈ sets M ⟹ (λx. f((T^^i)x) * indicator C x) ∈ borel_measurable M" by simp
have "⋀y. induced_function A f y = induced_function A f y * indicator ((return_time_function A)-`{1..}) y"
by (rule induced_function_support)
moreover have "(return_time_function A)-`{(1::nat)..} = (⋃n. D n)"
by (auto simp add: D_def Suc_le_D)
ultimately have "⋀y. induced_function A f y = induced_function A f y * indicator (⋃n. D n) y" by simp
then have "(∫⇧+y. induced_function A f y ∂M) = (∫⇧+y ∈ (⋃n. D n). induced_function A f y ∂M)" by simp
also have "... = (∑n. (∫⇧+y ∈ D n. induced_function A f y ∂M))"
apply (rule nn_integral_disjoint_family)
unfolding induced_function_def by (auto simp add: pos0 sum_nonneg ‹disjoint_family D›)
finally have a: "(∫⇧+y. induced_function A f y ∂M) = (∑n. (∫⇧+y ∈ D n. induced_function A f y ∂M))"
by simp
define d where "d = (λi n. (∫⇧+y ∈ D n. f((T^^i)y) ∂M))"
have "(∫⇧+y ∈ D n. induced_function A f y ∂M) = (∑i∈{..n}. d i n)" for n
proof -
have "induced_function A f y * indicator (D n) y = (∑i∈{..<n+1}. f((T^^i)y) * indicator (D n) y)" for y
by (auto simp add: induced_function_def D_def indicator_def)
then have "(∫⇧+y ∈ D n. induced_function A f y ∂M) = (∑i∈{..<n+1}. (∫⇧+y ∈ D n. f((T^^i)y) ∂M))"
using pos nn_integral_sum[of "{..<n+1}", of "λi y. f((T^^i)y) * indicator (D n) y"] by simp
also have "... = (∑i∈{..n}. (∫⇧+y ∈ D n. f((T^^i)y) ∂M))"
using lessThan_Suc_atMost by auto
finally show ?thesis using d_def by simp
qed
then have induced_dec: "(∫⇧+y. induced_function A f y ∂M) = (∑n. (∑i∈{..n}. d i n))"
using a by simp
have "(⋃n∈{1..}. (return_time_function A)-` {n}) = UNIV - (return_time_function A)-` {0}" by auto
then have "(⋃n∈{1..}. (return_time_function A)-` {n}) = recurrent_subset A"
using return_time0 by auto
moreover have "(⋃n. (return_time_function A)-` {n+1}) = (⋃n∈{1..}. (return_time_function A)-` {n})"
by (auto simp add: Suc_le_D)
ultimately have "(⋃n. D n) = recurrent_subset A" using D_def by simp
moreover have "(∫⇧+x ∈ A. f x ∂M) = (∫⇧+x ∈ recurrent_subset A. f x ∂M)"
by (rule nn_integral_null_delta, auto simp add: Diff_mono Un_absorb2 recurrent_subset_incl(1)[of A] Poincare_recurrence_thm(1)[OF assms(1)])
moreover have "B 0 = A" using B_def first_entrance_set_def by simp
ultimately have "(∫⇧+x ∈ B 0. f x ∂M) = (∫⇧+x ∈ (⋃n. D n). f x ∂M)" by simp
also have "... = (∑n. (∫⇧+x ∈ D n. f x ∂M))"
by (rule nn_integral_disjoint_family, auto simp add: ‹disjoint_family D›)
finally have B0dec: "(∫⇧+x ∈ B 0. f x ∂M) = (∑n. d 0 n)" using d_def by simp
have *: "(∫⇧+x ∈ B n. f x ∂M) = (∑i<k. (∫⇧+x ∈ D(n+i). f((T^^(i+1))x) ∂M)) + (∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M)" for n k
proof (induction k)
case 0
show ?case by simp
next
case (Suc k)
have "T--`(B(n+k)) = B(n+k+1) ∪ D(n+k)" using * ** by blast
have "(∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M) = (∫⇧+x. (λx. f((T^^k)x) * indicator (B (n+k)) x)(T x) ∂M)"
by (rule measure_preserving_preserves_nn_integral[OF Tm], auto simp add: pos)
also have "... = (∫⇧+x. f((T^^(k+1))x) * indicator (T--`(B (n+k))) x ∂M)"
proof (rule nn_integral_cong_AE)
have "(T^^k)(T x) = (T^^(k+1))x" for x
using comp_eq_dest_lhs by (metis Suc_eq_plus1 funpow.simps(2) funpow_swap1)
moreover have "AE x in M. f((T^^k)(T x)) * indicator (B (n+k)) (T x) = f((T^^k)(T x)) * indicator (T--`(B (n+k))) x"
by (simp add: indicator_def ‹⋀n. B n ∈ sets M›)
ultimately show "AE x in M. f((T^^k)(T x)) * indicator (B (n+k)) (T x) = f((T^^(k+1))x) * indicator (T--`(B (n+k))) x"
by simp
qed
also have "... = (∫⇧+x ∈ B(n+k+1) ∪ D(n+k). f((T^^(k+1))x) ∂M)"
using ‹T--`(B(n+k)) = B(n+k+1) ∪ D(n+k)› by simp
also have "... = (∫⇧+x ∈ B(n+k+1). f((T^^(k+1))x) ∂M) + (∫⇧+x ∈ D(n+k). f((T^^(k+1))x) ∂M)"
proof (rule nn_integral_disjoint_pair[OF mes0[of "k+1"]])
show "B(n+k+1) ∩ D(n+k) = {}" using * ** by blast
qed (auto)
finally have "(∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M) = (∫⇧+x ∈ B(n+k+1). f((T^^(k+1))x) ∂M) + (∫⇧+x ∈ D(n+k). f((T^^(k+1))x) ∂M)"
by simp
then show ?case by (simp add: Suc.IH add.commute add.left_commute)
qed
have a: "(λk. (∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M)) ⇢ 0" for n
proof -
define W where "W = {x ∈ space M. f x > 0} ∩ (T^^n)--`A"
have "emeasure M W ≤ emeasure M {x ∈ space M. f x > 0}"
by (intro emeasure_mono, auto simp add: W_def)
then have W_fin: "emeasure M W < ∞" using f_supp by auto
have W_meas [measurable]: "W ∈ sets M" unfolding W_def by simp
have W_incl: "W ⊆ (T^^n)--`A" unfolding W_def by simp
define V where "V = (λk. {x ∈ (T^^k)--`W. local_time A k x = 0})"
have V_meas [measurable]: "V k ∈ sets M" for k unfolding V_def by simp
have a: "(∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M) ≤ C * emeasure M (V k)" for k
proof -
have "f((T^^k)x) * indicator (B(n+k)) x = f((T^^k)x) * indicator (B(n+k) ∩ (T^^k)--`W) x" for x
proof (cases)
assume "f((T^^k)x) * indicator (B(n+k)) x = 0"
then show ?thesis by (simp add: indicator_def)
next
assume "¬(f((T^^k)x) * indicator (B(n+k)) x = 0)"
then have H: "f((T^^k)x) * indicator (B(n+k)) x ≠ 0" by simp
then have inB: "x ∈ B(n+k)" using H using indicator_simps(2) by fastforce
then have s: "x ∈ space M" using B_meas[of "n+k"] sets.sets_into_space by blast
then have a: "(T^^k)x ∈ space M" by (metis measurable_space Tn_meas[of k])
have "f((T^^k)x) > 0" using H by (simp add: le_neq_trans)
then have *: "(T^^k)x ∈ {y ∈ space M. f y > 0}" using a by simp
have "(T^^(n+k))x ∈ A" using inB B_def first_entrance_set_def by auto
then have "(T^^n)((T^^k)x) ∈ A" by (simp add: funpow_add)
then have "(T^^k)x ∈ (T^^n)--`A" using a by auto
then have "(T^^k)x ∈ W" using * W_def by simp
then have "x ∈ (T^^k)--`W" using s a by simp
then have "x ∈ (B(n+k) ∩ (T^^k)--`W)" using inB by simp
then show ?thesis by auto
qed
then have *: "(∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M) = (∫⇧+x ∈ B(n+k) ∩ (T^^k)--`W. f((T^^k)x) ∂M)"
by simp
have "B(n+k) ⊆ {x ∈ space M. local_time A k x = 0}"
unfolding local_time_def B_def first_entrance_set_def by auto
then have "B(n+k) ∩ (T^^k)--`W ⊆ V k" unfolding V_def by blast
then have "f((T^^k)x) * indicator (B(n+k) ∩ (T^^k)--`W) x ≤ ennreal C * indicator (V k) x" for x
using f_bound by (auto split: split_indicator)
then have "(∫⇧+x ∈ B(n+k) ∩ (T^^k)--`W. f((T^^k)x) ∂M) ≤ (∫⇧+x. ennreal C * indicator (V k) x ∂M)"
by (simp add: nn_integral_mono)
also have "... = ennreal C * emeasure M (V k)" by (simp add: ‹0 ≤ C› nn_integral_cmult)
finally show "(∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M) ≤ C * emeasure M (V k)" using * by simp
qed
have "(λk. emeasure M (V k)) ⇢ 0" unfolding V_def
using local_time_unbounded2[OF W_meas, OF W_fin, OF W_incl, of 1] by auto
from ennreal_tendsto_cmult[OF _ this, of C]
have t0: "(λk. C * emeasure M (V k)) ⇢ 0"
by simp
from a show "(λk. (∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M)) ⇢ 0"
by (intro tendsto_sandwich[OF _ _ tendsto_const t0]) auto
qed
have b: "(λk. (∑i<k.(∫⇧+x ∈ D(n+i). f((T^^(i+1))x) ∂M))) ⇢ (∑i. d (i+1) (n+i))" for n
proof -
define e where "e = (λi. d (i+1) (n+i))"
then have "(λk. (∑i<k. e i)) ⇢ (∑i. e i)"
by (intro summable_LIMSEQ) simp
then show "(λk. (∑i<k.(∫⇧+x ∈ D(n+i). f((T^^(i+1))x) ∂M))) ⇢ (∑i. d (i+1) (n+i))"
using e_def d_def by simp
qed
have "(λk. (∑i<k. (∫⇧+x ∈ D(n+i). f((T^^(i+1))x) ∂M)) + (∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M))
⇢ (∑i. d (i+1) (n+i))" for n
using tendsto_add[OF b a] by simp
moreover have "(λk. (∑i<k. (∫⇧+x ∈ D(n+i). f((T^^(i+1))x) ∂M)) + (∫⇧+x ∈ B(n+k). f((T^^k)x) ∂M))
⇢ (∫⇧+x ∈ B n. f x ∂M)" for n using * by simp
ultimately have "(∫⇧+x ∈ B n. f x ∂M) = (∑i. d (i+1) (n+i))" for n using LIMSEQ_unique by blast
then have "(∑n. (∫⇧+x ∈ B (n+1). f x ∂M)) = (∑n. (∑i. d (i+1) (n+1+i)))" by simp
then have "(∫⇧+ x ∈ (⋃n. (T^^n)--`A). f x ∂M) = (∑n. d 0 n) + (∑n. (∑i. d (i+1) (n+1+i)))"
using Bdec B0dec by simp
then show ?thesis using induced_dec kac_series_aux by simp
qed
text ‹We remove the boundedness assumption on $f$ and the finiteness assumption on its support
by truncation (both in space and on the values of $f$).›
theorem induced_function_nn_integral:
fixes f::"'a ⇒ ennreal"
assumes A_meas [measurable]: "A ∈ sets M"
and f_meas [measurable]: "f ∈ borel_measurable M"
shows "(∫⇧+y. induced_function A f y ∂M) = (∫⇧+x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
proof -
obtain Y::"nat ⇒ 'a set" where Y_meas: "⋀ n. Y n ∈ sets M" and Y_fin: "⋀n. emeasure M (Y n) ≠ ∞"
and Y_full: "(⋃n. Y n) = space M" and Y_inc: "incseq Y"
by (meson range_subsetD sigma_finite_incseq)
define F where "F = (λ(n::nat) x. min (f x) n * indicator (Y n) x)"
have mes [measurable]: "⋀n. (F n) ∈ borel_measurable M" unfolding F_def using assms(2) Y_meas by measurable
then have mes_rel [measurable]: "(λx. F n x * indicator (⋃n. (T^^n)--`A) x) ∈ borel_measurable M" for n
by measurable
have bound: "⋀n x. F n x ≤ ennreal n" by (simp add: F_def indicator_def ennreal_of_nat_eq_real_of_nat)
have "⋀n. {x ∈ space M. F n x > 0} ⊆ Y n" unfolding F_def using not_le by fastforce
then have le: "emeasure M {x ∈ space M. F n x > 0} ≤ emeasure M (Y n)" for n by (metis emeasure_mono Y_meas)
have fin: "emeasure M {x ∈ space M. F n x > 0} < ∞" for n
using Y_fin[of n] le[of n] by (simp add: less_top)
have *: "(∫⇧+y. induced_function A (F n) y ∂M) = (∫⇧+ x ∈ (⋃n. (T^^n)--`A). (F n) x ∂M)" for n
by (rule induced_function_nn_integral_aux[OF A_meas mes bound _ fin]) simp
have inc_Fx: "⋀x. incseq (λn. F n x)" unfolding F_def incseq_def
proof (auto simp add: incseq_def)
fix x::"'a" and m n::"nat"
assume "m ≤ n"
then have "min (f x) m ≤ min (f x) n" using linear by fastforce
moreover have "(indicator (Y m) x::ennreal) ≤ (indicator (Y n) x::ennreal)" using Y_inc
apply (auto simp add: incseq_def) using ‹m ≤ n› by blast
ultimately show "min (f x) m * (indicator (Y m) x::ennreal) ≤ min (f x) n * (indicator (Y n) x::ennreal)"
by (auto split: split_indicator)
qed
then have "incseq (λn. F n x * indicator (⋃n. (T^^n)--`A) x)" for x
by (auto simp add: indicator_def incseq_def)
then have inc_rel: "incseq (λn x. F n x * indicator (⋃n. (T^^n)--`A) x)" by (auto simp add: incseq_def le_fun_def)
then have a: "(SUP n. (∫⇧+ x ∈ (⋃n. (T^^n)--`A). F n x ∂M))
= (∫⇧+ x. (SUP n. F n x * indicator (⋃n. (T^^n)--`A) x) ∂M)"
using nn_integral_monotone_convergence_SUP[OF inc_rel, OF mes_rel] by simp
have SUP_Fx: "(SUP n. F n x) = f x" if "x ∈ space M" for x
proof -
obtain N where "x ∈ Y N" using Y_full ‹x ∈ space M› by auto
have "(SUP n. F n x) = (SUP n. inf (f x) (of_nat n))"
proof (rule SUP_eq)
show "∃j∈UNIV. F i x ≤ inf (f x) (of_nat j)" for i
by (auto simp: F_def intro!: exI[of _ i] split: split_indicator)
show "∃i∈UNIV. inf (f x) (of_nat j) ≤ F i x" for j
using ‹x ∈ Y N› ‹incseq Y›[THEN monoD, of N "max N j"]
by (intro bexI[of _ "max N j"])
(auto simp: F_def subset_eq not_le inf_min intro: min.coboundedI2 less_imp_le split: split_indicator split_max)
qed
then show ?thesis
by (simp add: inf_SUP[symmetric] ennreal_SUP_of_nat_eq_top)
qed
then have "(SUP n. F n x * indicator (⋃n. (T^^n)--`A) x) = f x * indicator (⋃n. (T^^n)--`A) x"
if "x ∈ space M" for x
by (auto simp add: indicator_def SUP_Fx that)
then have **: "(SUP n. (∫⇧+ x ∈ (⋃n. (T^^n)--`A). F n x ∂M)) = (∫⇧+ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
by (simp add: a cong: nn_integral_cong)
have "incseq (λn. induced_function A (F n) x)" for x
unfolding induced_function_def
using incseq_sumI2[of "{..<return_time_function A x}", of "λi n. F n ((T^^i)x)"] inc_Fx by simp
then have "incseq (λn. induced_function A (F n))" by (auto simp add: incseq_def le_fun_def)
then have b: "(SUP n. (∫⇧+ x. induced_function A (F n) x ∂M)) = (∫⇧+ x. (SUP n. induced_function A (F n) x) ∂M)"
by (rule nn_integral_monotone_convergence_SUP[symmetric]) (measurable)
have "(SUP n. induced_function A (F n) x) = induced_function A f x" if [simp]: "x ∈ space M" for x
proof -
have "(SUP n. (∑ i ∈{..<return_time_function A x}. F n ((T^^i)x)))
= (∑ i ∈ {..<return_time_function A x}. f ((T^^i)x))"
using ennreal_SUP_sum[OF inc_Fx, where ?I = "{..<return_time_function A x}"] SUP_Fx by simp
then show "(SUP n. induced_function A (F n) x) = induced_function A f x"
by (auto simp add: induced_function_def)
qed
then have "(SUP n. (∫⇧+ x. induced_function A (F n) x ∂M)) = (∫⇧+ x. induced_function A f x ∂M)"
by (simp add: b cong: nn_integral_cong)
then show ?thesis using * ** by simp
qed
text ‹Taking the constant function equal to $1$ in the previous statement, we obtain the usual
Kac Formula.›
theorem kac_formula_nonergodic:
assumes A_meas [measurable]: "A ∈ sets M"
shows "(∫⇧+y. return_time_function A y ∂M) = emeasure M (⋃n. (T^^n)--`A)"
proof -
define f where "f = (λ(x::'a). 1::ennreal)"
have "⋀x. induced_function A f x = return_time_function A x"
unfolding induced_function_def f_def by (simp add:)
then have "(∫⇧+y. return_time_function A y ∂M) = (∫⇧+y. induced_function A f y ∂M)" by auto
also have "... = (∫⇧+ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
by (rule induced_function_nn_integral) (auto simp add: f_def)
also have "... = emeasure M (⋃n. (T^^n)--`A)" using f_def by auto
finally show ?thesis by simp
qed
lemma (in fmpt) return_time_integrable:
assumes A_meas [measurable]: "A ∈ sets M"
shows "integrable M (return_time_function A)"
by (rule integrableI_nonneg)
(auto simp add: kac_formula_nonergodic[OF assms] ennreal_of_nat_eq_real_of_nat[symmetric] less_top[symmetric])
text ‹Now, we want to prove the same result but for real-valued integrable function. We first
prove the statement for nonnegative functions by reducing to the nonnegative extended reals,
and then for general functions by difference.›
lemma induced_function_integral_aux:
fixes f::"'a ⇒ real"
assumes A_meas [measurable]: "A ∈ sets M"
and f_int [measurable]: "integrable M f"
and f_pos: "⋀x. f x ≥ 0"
shows "integrable M (induced_function A f)"
"(∫y. induced_function A f y ∂M) = (∫x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
proof -
show "integrable M (induced_function A f)"
proof (rule integrableI_nonneg)
show "AE x in M. induced_function A f x ≥ 0" unfolding induced_function_def by (simp add: f_pos sum_nonneg)
have "(∫⇧+x. ennreal (induced_function A f x) ∂M) = (∫⇧+ x. induced_function A (λx. ennreal(f x)) x ∂M)"
unfolding induced_function_def by (auto simp: f_pos)
also have "... = (∫⇧+ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
by (rule induced_function_nn_integral, auto simp add: assms)
also have "... ≤ (∫⇧+ x. f x ∂M)"
using nn_set_integral_set_mono[where ?A = "(⋃n. (T^^n)--`A)" and ?B = UNIV and ?f = "λx. ennreal(f x)"]
by auto
also have "... < ∞" using assms by (auto simp: less_top)
finally show "(∫⇧+ x. induced_function A f x ∂M) < ∞" by simp
qed (simp)
have "(∫⇧+ x. (f x * indicator (⋃n. (T^^n)--`A) x) ∂M) = (∫⇧+ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
by (auto split: split_indicator intro!: nn_integral_cong)
also have "... = (∫⇧+ x. induced_function A (λx. ennreal(f x)) x ∂M)"
by (rule induced_function_nn_integral[symmetric], auto simp add: assms)
also have "... = (∫⇧+x. ennreal (induced_function A f x) ∂M)" unfolding induced_function_def by (auto simp: f_pos)
finally have *: "(∫⇧+ x. (f x * indicator (⋃n. (T^^n)--`A) x) ∂M) = (∫⇧+x. ennreal (induced_function A f x) ∂M)"
by simp
have "(∫ x ∈ (⋃n. (T^^n)--`A). f x ∂M) = (∫ x. f x * indicator (⋃n. (T^^n)--`A) x ∂M)"
by (simp add: mult.commute set_lebesgue_integral_def)
also have "... = enn2real (∫⇧+ x. (f x * indicator (⋃n. (T^^n)--`A) x) ∂M)"
by (rule integral_eq_nn_integral, auto simp add: f_pos)
also have "... = enn2real (∫⇧+x. ennreal (induced_function A f x) ∂M)" using * by simp
also have "... = (∫ x. induced_function A f x ∂M)"
apply (rule integral_eq_nn_integral[symmetric])
unfolding induced_function_def by (auto simp add: f_pos sum_nonneg)
finally show "(∫ x. induced_function A f x ∂M) = (∫ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
by simp
qed
text ‹Here is the general version of Kac's Formula (for a general induced function, starting
from a real-valued integrable function).›
theorem induced_function_integral_nonergodic:
fixes f::"'a ⇒ real"
assumes [measurable]: "A ∈ sets M" "integrable M f"
shows "integrable M (induced_function A f)"
"(∫y. induced_function A f y ∂M) = (∫x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
proof -
have U_meas [measurable]: "(⋃n. (T^^n)--`A) ∈ sets M" by measurable
define g where "g = (λx. max (f x) 0)"
have g_int [measurable]: "integrable M g" unfolding g_def using assms by auto
then have g_int2: "integrable M (induced_function A g)"
using induced_function_integral_aux(1) g_def by auto
define h where "h = (λx. max (-f x) 0)"
have h_int [measurable]: "integrable M h" unfolding h_def using assms by auto
then have h_int2: "integrable M (induced_function A h)"
using induced_function_integral_aux(1) h_def by auto
have D1: "f = (λx. g x - h x)" unfolding g_def h_def by auto
have D2: "induced_function A f = (λx. induced_function A g x - induced_function A h x)"
unfolding induced_function_def using D1 by (simp add: sum_subtractf)
then show "integrable M (induced_function A f)" using g_int2 h_int2 by auto
have "(∫x. induced_function A f x ∂M) = (∫x. induced_function A g x - induced_function A h x ∂M)"
using D2 by simp
also have "... = (∫x. induced_function A g x ∂M) - (∫x. induced_function A h x ∂M)"
using g_int2 h_int2 by auto
also have "... = (∫x ∈ (⋃n. (T^^n)--`A). g x ∂M) - (∫x ∈ (⋃n. (T^^n)--`A). h x ∂M)"
using induced_function_integral_aux(2) g_def h_def g_int h_int by auto
also have "... = (∫x ∈ (⋃n. (T^^n)--`A). (g x - h x) ∂M)"
apply (rule set_integral_diff(2)[symmetric])
unfolding set_integrable_def
using g_int h_int integrable_mult_indicator[OF U_meas] by blast+
also have "... = (∫x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
using D1 by simp
finally show "(∫x. induced_function A f x ∂M) = (∫x ∈ (⋃n. (T^^n)--`A). f x ∂M)" by simp
qed
text ‹We can reformulate the previous statement in terms of induced measure.›
lemma induced_function_integral_restr_nonergodic:
fixes f::"'a ⇒ real"
assumes [measurable]: "A ∈ sets M" "integrable M f"
shows "integrable (restrict_space M A) (induced_function A f)"
"(∫y. induced_function A f y ∂(restrict_space M A)) = (∫ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
proof -
have [measurable]: "integrable M (induced_function A f)" by (rule induced_function_integral_nonergodic(1)[OF assms])
then show "integrable (restrict_space M A) (induced_function A f)"
by (metis assms(1) integrable_mult_indicator integrable_restrict_space sets.Int_space_eq2)
have "(∫y. induced_function A f y ∂(restrict_space M A)) = (∫y ∈ A. induced_function A f y ∂M)"
by (simp add: integral_restrict_space set_lebesgue_integral_def)
also have "... = (∫y. induced_function A f y ∂M)"
unfolding real_scaleR_def set_lebesgue_integral_def
proof (rule Bochner_Integration.integral_cong [OF refl])
have "induced_function A f y = 0" if "y ∉ A" for y unfolding induced_function_def
using that return_time0[of A] recurrent_subset_incl(1)[of A] return_time_function_def by auto
then show "⋀x. indicator A x * induced_function A f x = induced_function A f x"
unfolding indicator_def by auto
qed
also have "... = (∫ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
by (rule induced_function_integral_nonergodic(2)[OF assms])
finally show "(∫y. induced_function A f y ∂(restrict_space M A)) = (∫ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
by simp
qed
end
end
Theory Invariants
section ‹The invariant sigma-algebra, Birkhoff theorem›
theory Invariants
imports Recurrence "HOL-Probability.Conditional_Expectation"
begin
subsection ‹The sigma-algebra of invariant subsets›
text ‹The invariant sigma-algebra of a qmpt is made of those sets that are invariant under the
dynamics. When the transformation is ergodic, it is made of sets of zero or full measure. In
general, the Birkhoff theorem is expressed in terms of the conditional expectation of an integrable
function with respect to the invariant sigma-algebra.›
context qmpt begin
text ‹We define the invariant sigma-algebra, as the sigma algebra of sets which are invariant
under the dynamics, i.e., they coincide with their preimage under $T$.›
definition Invariants where "Invariants = sigma (space M) {A ∈ sets M. T-`A ∩ space M = A}"
text ‹For this definition to make sense, we need to check that it really defines a sigma-algebra:
otherwise, the \verb+sigma+ operation would make garbage out of it. This is the content of the next
lemma.›
lemma Invariants_sets: "sets Invariants = {A ∈ sets M. T-`A ∩ space M = A}"
proof -
have "sigma_algebra (space M) {A ∈ sets M. T-`A ∩ space M = A}"
proof -
define I where "I = {A. T-`A ∩ space M = A}"
have i: "⋀A. A ∈ I ⟹ A ⊆ space M" unfolding I_def by auto
have "algebra (space M) I"
proof (subst algebra_iff_Un)
have a: "I ⊆ Pow (space M)" using i by auto
have b: "{} ∈ I" unfolding I_def by auto
{
fix A assume *: "A ∈ I"
then have "T-`(space M - A) = T-`(space M) - T-`A" by auto
then have "T-`(space M - A) ∩ space M = T-`(space M) ∩ (space M) - T-`A ∩ (space M)" by auto
also have "... = space M - A" using * I_def by (simp add: inf_absorb2 subsetI)
finally have "space M - A ∈ I" unfolding I_def by simp
}
then have c: "(∀a∈I. space M - a ∈ I)" by simp
have d: "(∀a∈I. ∀b∈I. a ∪ b ∈ I)" unfolding I_def by auto
show "I ⊆ Pow (space M) ∧ {} ∈ I ∧ (∀a∈I. space M - a ∈ I) ∧ (∀a∈I. ∀b∈I. a ∪ b ∈ I)"
using a b c d by blast
qed
moreover have "(∀F. range F ⊆ I ⟶ (⋃i::nat. F i) ∈ I)" unfolding I_def by auto
ultimately have "sigma_algebra (space M) I" using sigma_algebra_iff by auto
moreover have "sigma_algebra (space M) (sets M)" using measure_space measure_space_def by auto
ultimately have "sigma_algebra (space M) (I ∩ (sets M))" using sigma_algebra_intersection by auto
moreover have "I ∩ sets M = {A ∈ sets M. T-`A ∩ space M = A}" unfolding I_def by auto
ultimately show ?thesis by simp
qed
then show ?thesis unfolding Invariants_def using sigma_algebra.sets_measure_of_eq by blast
qed
text ‹By definition, the invariant subalgebra is a subalgebra of the original algebra. This is
expressed in the following lemmas.›
lemma Invariants_is_subalg: "subalgebra M Invariants"
unfolding subalgebra_def
using Invariants_sets Invariants_def by (simp add: space_measure_of_conv)
lemma Invariants_in_sets:
assumes "A ∈ sets Invariants"
shows "A ∈ sets M"
using Invariants_sets assms by blast
lemma Invariants_measurable_func:
assumes "f ∈ measurable Invariants N"
shows "f ∈ measurable M N"
using Invariants_is_subalg measurable_from_subalg assms by auto
text ‹We give several trivial characterizations of invariant sets or functions.›
lemma Invariants_vrestr:
assumes "A ∈ sets Invariants"
shows "T--`A = A"
using assms Invariants_sets Invariants_in_sets[OF assms] by auto
lemma Invariants_points:
assumes "A ∈ sets Invariants" "x ∈ A"
shows "T x ∈ A"
using assms Invariants_sets by auto
lemma Invariants_func_is_invariant:
fixes f::"_ ⇒ 'b::t2_space"
assumes "f ∈ borel_measurable Invariants" "x ∈ space M"
shows "f (T x) = f x"
proof -
have "{f x} ∈ sets borel" by simp
then have "f-`({f x}) ∩ space M ∈ Invariants" using assms(1)
by (metis (no_types, lifting) Invariants_def measurable_sets space_measure_of_conv)
moreover have "x ∈ f-`({f x}) ∩ space M" using assms(2) by blast
ultimately have "T x ∈ f-`({f x}) ∩ space M" by (rule Invariants_points)
then show ?thesis by simp
qed
lemma Invariants_func_is_invariant_n:
fixes f::"_ ⇒ 'b::t2_space"
assumes "f ∈ borel_measurable Invariants" "x ∈ space M"
shows "f ((T^^n) x) = f x"
by (induction n, auto simp add: assms Invariants_func_is_invariant)
lemma Invariants_func_charac:
assumes [measurable]: "f ∈ measurable M N"
and "⋀x. x ∈ space M ⟹ f(T x) = f x"
shows "f ∈ measurable Invariants N"
proof (rule measurableI)
fix A assume "A ∈ sets N"
have "space Invariants = space M" using Invariants_is_subalg subalgebra_def by force
show "f -` A ∩ space Invariants ∈ sets Invariants"
apply (subst Invariants_sets)
apply (auto simp add: assms ‹A ∈ sets N› ‹space Invariants = space M›)
using ‹A ∈ sets N› assms(1) measurable_sets by blast
next
fix x assume "x ∈ space Invariants"
have "space Invariants = space M" using Invariants_is_subalg subalgebra_def by force
then show "f x ∈ space N" using assms(1) ‹x ∈ space Invariants› by (metis measurable_space)
qed
lemma birkhoff_sum_of_invariants:
fixes f::" _ ⇒ real"
assumes "f ∈ borel_measurable Invariants" "x ∈ space M"
shows "birkhoff_sum f n x = n * f x"
unfolding birkhoff_sum_def using Invariants_func_is_invariant_n[OF assms] by auto
text ‹There are two possible definitions of the invariant sigma-algebra, competing in the
literature: one could also use the sets such that $T^{-1}A$ coincides with $A$ up to
a measure $0$ set. It turns out that this is equivalent to being invariant (in our sense) up
to a measure $0$ set. Therefore, for all interesting purposes, the two definitions would
give the same results.
For the proof, we start from an almost invariant set, and build a genuinely invariant set that
coincides with it by adding or throwing away null parts.
›
proposition Invariants_quasi_Invariants_sets:
assumes [measurable]: "A ∈ sets M"
shows "(∃B ∈ sets Invariants. A Δ B ∈ null_sets M) ⟷ (T--`A Δ A ∈ null_sets M)"
proof
assume "∃B ∈ sets Invariants. A Δ B ∈ null_sets M"
then obtain B where "B ∈ sets Invariants" "A Δ B ∈ null_sets M" by auto
then have [measurable]: "B ∈ sets M" using Invariants_in_sets by simp
have "B = T--` B" using Invariants_vrestr ‹B ∈ sets Invariants› by simp
then have "T--`A Δ B = T--`(A Δ B)" by simp
moreover have "T--`(A Δ B) ∈ null_sets M"
by (rule T_quasi_preserves_null2(1)[OF ‹A Δ B ∈ null_sets M›])
ultimately have "T--`A Δ B ∈ null_sets M" by simp
then show "T--`A Δ A ∈ null_sets M"
by (rule null_sym_diff_transitive) (auto simp add: ‹A Δ B ∈ null_sets M› Un_commute)
next
assume H: "T --` A Δ A ∈ null_sets M"
have [measurable]: "⋀n. (T^^n)--`A ∈ sets M" by simp
{
fix K assume [measurable]: "K ∈ sets M" and "T--`K Δ K ∈ null_sets M"
fix n::nat
have "(T^^n)--`K Δ K ∈ null_sets M"
proof (induction n)
case 0
have "(T^^0)--` K = K" using T_vrestr_0 by simp
then show ?case using Diff_cancel sup.idem by (metis null_sets.empty_sets)
next
case (Suc n)
have "T--`((T^^n)--`K Δ K) ∈ null_sets M"
using Suc.IH T_quasi_preserves_null(1)[of "((T^^n)--`K Δ K)"] by simp
then have *: "(T^^(Suc n))--`K Δ T--`K ∈ null_sets M" using T_vrestr_composed(2)[OF ‹K ∈ sets M›] by simp
then show ?case
by (rule null_sym_diff_transitive, simp add: ‹T--`K Δ K ∈ null_sets M› ‹K ∈ sets M›, measurable)
qed
} note * = this
define C where "C = (⋂n. (T^^n)--`A)"
have [measurable]: "C ∈ sets M" unfolding C_def by simp
have "C Δ A ⊆ (⋃n. (T^^n)--`A Δ A)" unfolding C_def by auto
moreover have "(⋃n. (T^^n)--`A Δ A) ∈ null_sets M"
using * null_sets_UN assms ‹T --` A Δ A ∈ null_sets M› by auto
ultimately have CA: "C Δ A ∈ null_sets M" by (meson ‹C ∈ sets M› assms sets.Diff sets.Un null_sets_subset)
then have "T--`(C Δ A) ∈ null_sets M" by (rule T_quasi_preserves_null2(1))
then have "T--`C Δ T--`A ∈ null_sets M" by simp
then have "T--`C Δ A ∈ null_sets M"
by (rule null_sym_diff_transitive, auto simp add: H)
then have TCC: "T--`C Δ C ∈ null_sets M"
apply (rule null_sym_diff_transitive) using CA by (auto simp add: Un_commute)
have "C ⊆ (⋂n∈{1..}. (T^^n)--`A)" unfolding C_def by auto
moreover have "T--`C = (⋂n∈{1..}. (T^^n)--`A)"
using T_vrestr_composed(2)[OF assms] by (simp add: C_def atLeast_Suc_greaterThan greaterThan_0)
ultimately have "C ⊆ T--`C" by blast
then have "(T^^0)--`C ⊆ (T^^1)--`C" using T_vrestr_0 by auto
moreover have "(T^^1)--`C ⊆ (⋃n∈{1..}. (T^^n)--`C)" by auto
ultimately have "(T^^0)--`C ⊆ (⋃n∈{1..}. (T^^n)--`C)" by auto
then have "(T^^0)--`C ∪ (⋃n∈{1..}. (T^^n)--`C) = (⋃n∈{1..}. (T^^n)--`C)" by auto
moreover have "(⋃n. (T^^n)--`C) = (T^^0)--`C ∪ (⋃n∈{1..}. (T^^n)--`C)" by (rule union_insert_0)
ultimately have C2: "(⋃n. (T^^n)--`C) = (⋃n∈{1..}. (T^^n)--`C)" by simp
define B where "B = (⋃n. (T^^n)--`C)"
have [measurable]: "B ∈ sets M" unfolding B_def by simp
have "B Δ C ⊆ (⋃n. (T^^n)--`C Δ C)" unfolding B_def by auto
moreover have "(⋃n. (T^^n)--`C Δ C) ∈ null_sets M"
using * null_sets_UN assms TCC by auto
ultimately have "B Δ C ∈ null_sets M" by (meson ‹B ∈ sets M› ‹C ∈ sets M› assms sets.Diff sets.Un null_sets_subset)
then have "B Δ A ∈ null_sets M"
by (rule null_sym_diff_transitive, auto simp add: CA)
then have a: "A Δ B ∈ null_sets M" by (simp add: Un_commute)
have "T--`B = (⋃n∈{1..}. (T^^n)--`C)"
using T_vrestr_composed(2)[OF ‹C ∈ sets M›] by (simp add: B_def atLeast_Suc_greaterThan greaterThan_0)
then have "T--`B = B" unfolding B_def using C2 by simp
then have "B ∈ sets Invariants" using Invariants_sets vimage_restr_def by auto
then show "∃B ∈ sets Invariants. A Δ B ∈ null_sets M" using a by blast
qed
text ‹In a conservative setting, it is enough to be included in its image or its preimage to be
almost invariant: otherwise, since the difference set has disjoint preimages, and is therefore
null by conservativity.›
lemma (in conservative) preimage_included_then_almost_invariant:
assumes [measurable]: "A ∈ sets M" and "T--`A ⊆ A"
shows "A Δ (T--`A) ∈ null_sets M"
proof -
define B where "B = A - T--`A"
then have [measurable]: "B ∈ sets M" by simp
have "(T^^(Suc n))--`A ⊆ (T^^n)--`A" for n using T_vrestr_composed(3)[OF assms(1)] vrestr_inclusion[OF assms(2)] by auto
then have "disjoint_family (λn. (T^^n)--`A - (T^^(Suc n))--`A)" by (rule disjoint_family_Suc2[where ?A = "λn. (T^^n)--`A"])
moreover have "(T^^n)--`A - (T^^(Suc n))--`A = (T^^n)--`B" for n unfolding B_def Suc_eq_plus1 using T_vrestr_composed(3)[OF assms(1)] by auto
ultimately have "disjoint_family (λn. (T^^n)--` B)" by simp
then have "⋀n. n ≠ 0 ⟹ ((T^^n)--`B) ∩ B = {}" unfolding disjoint_family_on_def by (metis UNIV_I T_vrestr_0(1)[OF ‹B ∈ sets M›])
then have "⋀n. n > 0 ⟹ (T^^n)-`B ∩ B = {}" unfolding vimage_restr_def by (simp add: Int_assoc)
then have "B ∈ null_sets M" using disjoint_then_null[OF ‹B ∈ sets M›] Int_commute by auto
then show ?thesis unfolding B_def using assms(2) by (simp add: Diff_mono Un_absorb2)
qed
lemma (in conservative) preimage_includes_then_almost_invariant:
assumes [measurable]: "A ∈ sets M" and "A ⊆ T--`A"
shows "A Δ (T--`A) ∈ null_sets M"
proof -
define B where "B = T--`A - A"
then have [measurable]: "B ∈ sets M" by simp
have "⋀n. (T^^(Suc n))--`A ⊇ (T^^n)--`A" using T_vrestr_composed(3)[OF assms(1)] vrestr_inclusion[OF assms(2)] by auto
then have "disjoint_family (λn. (T^^(Suc n))--`A - (T^^n)--`A)" by (rule disjoint_family_Suc[where ?A = "λn. (T^^n)--`A"])
moreover have "⋀n. (T^^(Suc n))--`A - (T^^n)--`A = (T^^n)--`B" unfolding B_def Suc_eq_plus1 using T_vrestr_composed(3)[OF assms(1)] by auto
ultimately have "disjoint_family (λn. (T^^n)--` B)" by simp
then have "⋀n. n ≠ 0 ⟹ ((T^^n)--`B) ∩ B = {}" unfolding disjoint_family_on_def by (metis UNIV_I T_vrestr_0(1)[OF ‹B ∈ sets M›])
then have "⋀n. n > 0 ⟹ (T^^n)-`B ∩ B = {}" unfolding vimage_restr_def by (simp add: Int_assoc)
then have "B ∈ null_sets M" using disjoint_then_null[OF ‹B ∈ sets M›] Int_commute by auto
then show ?thesis unfolding B_def using assms(2) by (simp add: Diff_mono Un_absorb1)
qed
text ‹The above properties for sets are also true for functions: if $f$ and $f \circ T$
coincide almost everywhere, i.e., $f$ is almost invariant, then $f$ coincides almost everywhere
with a true invariant function.
The idea of the proof is straightforward: throw away the orbits on
which $f$ is not really invariant (say this is the complement of the good set),
and replace it by $0$ there. However, this does not work
directly: the good set is not invariant, some points may have a non-constant value of $f$ on their
orbit but reach the good set eventually. One can however define $g$ to be equal to the
eventual value of $f$ along the orbit, if the orbit reaches the good set, and $0$ elsewhere.›
proposition Invariants_quasi_Invariants_functions:
fixes f::"_ ⇒ 'b::{second_countable_topology, t2_space}"
assumes f_meas [measurable]: "f ∈ borel_measurable M"
shows "(∃g ∈ borel_measurable Invariants. AE x in M. f x = g x) ⟷ (AE x in M. f(T x) = f x)"
proof
assume "∃g∈borel_measurable Invariants. AE x in M. f x = g x"
then obtain g where g:"g∈borel_measurable Invariants" "AE x in M. f x = g x" by blast
then have [measurable]: "g ∈ borel_measurable M" using Invariants_measurable_func by auto
define A where "A = {x ∈ space M. f x = g x}"
have [measurable]: "A ∈ sets M" unfolding A_def by simp
define B where "B = space M - A"
have [measurable]: "B ∈ sets M" unfolding B_def by simp
moreover have "AE x in M. x ∉ B" unfolding B_def A_def using g(2) by auto
ultimately have "B ∈ null_sets M" using AE_iff_null_sets by blast
then have "T--`B ∈ null_sets M" by (rule T_quasi_preserves_null2(1))
then have "B ∪ T--`B ∈ null_sets M" using ‹B ∈ null_sets M› by auto
then have "AE x in M. x ∉ (B ∪ T--`B)" using AE_iff_null_sets null_setsD2 by blast
then have i: "AE x in M. x ∈ space M - (B ∪ T--`B)" by auto
{
fix x assume *: "x ∈ space M - (B ∪ T--`B)"
then have "x ∈ A" unfolding B_def by blast
then have "f x = g x" unfolding A_def by blast
have "T x ∈ A" using * B_def by auto
then have "f(T x) = g(T x)" unfolding A_def by blast
moreover have "g(T x) = g x"
apply (rule Invariants_func_is_invariant) using * by (auto simp add: assms ‹g∈borel_measurable Invariants›)
ultimately have "f(T x) = f x" using ‹f x = g x› by simp
}
then show "AE x in M. f(T x) = f x" using i by auto
next
assume *: "AE x in M. f (T x) = f x"
text ‹\verb+good_set+ is the set of points for which $f$ is constant on their orbit. Here, we define
$g = f$. If a point ever enters \verb+good_set+, then we take $g$ to be the value of $f$ there. Otherwise,
$g$ takes an arbitrary value, say $y_0$.›
define good_set where "good_set = {x ∈ space M. ∀n. f((T^^(Suc n)) x) = f((T^^n) x)}"
define good_time where "good_time = (λx. Inf {n. (T^^n) x ∈ good_set})"
have "AE x in M. x ∈ good_set" using T_AE_iterates[OF *] by (simp add: good_set_def)
have [measurable]: "good_set ∈ sets M" unfolding good_set_def by auto
obtain y0::'b where True by auto
define g where "g = (λx. if (∃n. (T^^n) x ∈ good_set) then f((T^^(good_time x)) x) else y0)"
have [measurable]: "good_time ∈ measurable M (count_space UNIV)" unfolding good_time_def by measurable
have [measurable]: "g ∈ borel_measurable M" unfolding g_def by measurable
have "f x = g x" if "x ∈ good_set" for x
proof -
have a: "0 ∈ {n. (T^^n) x ∈ good_set}" using that by simp
have "good_time x = 0"
unfolding good_time_def apply (intro cInf_eq_non_empty) using a by blast+
moreover have "{n. (T^^n) x ∈ good_set} ≠ {}" using a by blast
ultimately show "f x = g x" unfolding g_def by auto
qed
then have "AE x in M. f x = g x" using ‹AE x in M. x ∈ good_set› by auto
have *: "f((T^^(Suc 0)) x) = f((T^^0) x)" if "x ∈ good_set" for x
using that unfolding good_set_def by blast
have good_1: "T x ∈ good_set ∧ f(T x) = f x" if "x ∈ good_set" for x
using *[OF that] that unfolding good_set_def apply (auto)
unfolding T_Tn_T_compose by blast
then have good_k: "⋀x. x ∈ good_set ⟹ (T^^k) x ∈ good_set ∧ f((T^^k) x) = f x" for k
by (induction k, auto)
have "g(T x) = g x" if "x ∈ space M" for x
proof (cases)
assume *: "∃n. (T^^n) (T x) ∈ good_set"
define n where "n = Inf {n. (T^^n) (T x) ∈ good_set}"
have "(T^^n)(T x) ∈ good_set" using * Inf_nat_def1 by (metis empty_iff mem_Collect_eq n_def)
then have a: "(T^^(n+1)) x ∈ good_set" by (metis Suc_eq_plus1 comp_eq_dest_lhs funpow.simps(2) funpow_swap1)
then have **: "∃m. (T^^m) x ∈ good_set" by blast
define m where "m = Inf {m. (T^^m) x ∈ good_set}"
then have "(T^^m) x ∈ good_set" using ** Inf_nat_def1 by (metis empty_iff mem_Collect_eq)
have "n+1 ∈ {m. (T^^m) x ∈ good_set}" using a by simp
then have "m ≤ n+1" using m_def by (simp add: Inf_nat_def Least_le)
then obtain k where "n+1 = m + k" using le_iff_add by blast
have "g x = f((T^^m) x)" unfolding g_def good_time_def using ** m_def by simp
also have "... = f((T^^k) ((T^^m) x))" using ‹(T^^m) x ∈ good_set› good_k by simp
also have "... = f((T^^(n+1))x)" using ‹n+1 = m + k›[symmetric] funpow_add by (metis add.commute comp_apply)
also have "... = f((T^^n) (T x))" using funpow_Suc_right by (metis Suc_eq_plus1 comp_apply)
also have "... = g(T x)" unfolding g_def good_time_def using * n_def by simp
finally show "g(T x) = g x" by simp
next
assume *: "¬(∃n. (T^^n) (T x) ∈ good_set)"
then have "g(T x) = y0" unfolding g_def by simp
have **: "¬(∃n. (T^^(Suc n)) x ∈ good_set)" using funpow_Suc_right * by (metis comp_apply)
have "T x ∉ good_set" using good_k * by blast
then have "x ∉ good_set" using good_1 by auto
then have "¬(∃n. (T^^n) x ∈ good_set)" using ** using good_1 by fastforce
then have "g x = y0" unfolding g_def by simp
then show "g(T x) = g x" using ‹g(T x) = y0› by simp
qed
then have "g ∈ borel_measurable Invariants" by (rule Invariants_func_charac[OF ‹g ∈ borel_measurable M›])
then show "∃g∈borel_measurable Invariants. AE x in M. f x = g x" using ‹AE x in M. f x = g x› by blast
qed
text ‹In a conservative setting, it suffices to have an almost everywhere inequality to get
an almost everywhere equality, as the set where there is strict inequality has $0$ measure
as its iterates are disjoint, by conservativity.›
proposition (in conservative) AE_decreasing_then_invariant:
fixes f::"_ ⇒ 'b::{linorder_topology, second_countable_topology}"
assumes "AE x in M. f(T x) ≤ f x"
and [measurable]: "f ∈ borel_measurable M"
shows "AE x in M. f(T x) = f x"
proof -
obtain D::"'b set" where D: "countable D" "(∀x y. x < y ⟶ (∃d ∈ D. x ≤ d ∧ d < y))"
using countable_separating_set_linorder2 by blast
define A where "A = {x ∈ space M. f(T x) ≤ f x}"
then have [measurable]: "A ∈ sets M" by simp
define B where "B = {x ∈ space M. ∀n. f((T^^(n+1)) x) ≤ f((T^^n)x)}"
then have [measurable]: "B ∈ sets M" by simp
have "space M - A ∈ null_sets M" unfolding A_def using assms by (simp add: assms(1) AE_iff_null_sets)
then have "(⋃n. (T^^n)--`(space M - A)) ∈ null_sets M" by (metis null_sets_UN T_quasi_preserves_null2(2))
moreover have "space M - B = (⋃n. (T^^n)--`(space M - A))"
unfolding B_def A_def by auto
ultimately have "space M - B ∈ null_sets M" by simp
have *: "B = (⋂n. (T^^n)--`A)"
unfolding B_def A_def by auto
then have "T--`B = (⋂n. T--` (T^^n)--`A)" by auto
also have "... = (⋂n. (T^^(n+1))--`A)" using T_vrestr_composed(2)[OF ‹A ∈ sets M›] by simp
also have "... ⊇ (⋂n. (T^^n)--`A)" by blast
finally have B1: "B ⊆ T--`B" using * by simp
have "B ⊆ A" using * T_vrestr_0[OF ‹A ∈ sets M›] by blast
then have B2: "⋀x. x ∈ B ⟹ f(T x) ≤ f x" unfolding A_def by auto
define C where "C = (λt. {x ∈ B. f x ≤ t})"
{
fix t
have "C t = B ∩ f-`{..t} ∩ space M" unfolding C_def using sets.sets_into_space[OF ‹B ∈ sets M›] by auto
then have [measurable]: "C t ∈ sets M" using assms(2) by simp
have "C t ⊆ T--`(C t)" using B1 unfolding C_def vimage_restr_def apply auto using B2 order_trans by blast
then have "T--`(C t) - C t ∈ null_sets M" by (metis Diff_mono Un_absorb1 preimage_includes_then_almost_invariant[OF ‹C t ∈ sets M›])
}
then have "(⋃d∈D. T--`(C d) - C d) ∈ null_sets M" using ‹countable D› by (simp add: null_sets_UN')
then have "(space M - B) ∪ (⋃d∈D. T--`(C d) - C d) ∈ null_sets M" using ‹space M - B ∈ null_sets M› by auto
then have "AE x in M. x ∉ (space M - B) ∪ (⋃d∈D. T--`(C d) - C d)" using AE_not_in by blast
moreover
{
fix x assume x: "x ∈ space M" "x ∉ (space M - B) ∪ (⋃d∈D. T--`(C d) - C d)"
then have "x ∈ B" by simp
then have "T x ∈ B" using B1 by auto
have "f(T x) = f x"
proof (rule ccontr)
assume "f(T x) ≠ f x"
then have "f(T x) < f x" using B2[OF ‹x ∈ B›] by simp
then obtain d where d: "d ∈ D" "f(T x) ≤ d ∧ d < f x" using D by auto
then have "T x ∈ C d" using ‹T x ∈ B› unfolding C_def by simp
then have "x ∈ T--`(C d)" using ‹x ∈ space M› by simp
then have "x ∈ C d" using x ‹d ∈ D› by simp
then have "f x ≤ d" unfolding C_def by simp
then show False using d by auto
qed
}
ultimately show ?thesis by auto
qed
proposition (in conservative) AE_increasing_then_invariant:
fixes f::"_ ⇒ 'b::{linorder_topology, second_countable_topology}"
assumes "AE x in M. f(T x) ≥ f x"
and [measurable]: "f ∈ borel_measurable M"
shows "AE x in M. f(T x) = f x"
proof -
obtain D::"'b set" where D: "countable D" "(∀x y. x < y ⟶ (∃d ∈ D. x < d ∧ d ≤ y))"
using countable_separating_set_linorder1 by blast
define A where "A = {x ∈ space M. f(T x) ≥ f x}"
then have [measurable]: "A ∈ sets M" by simp
define B where "B = {x ∈ space M. ∀n. f((T^^(n+1)) x) ≥ f((T^^n)x)}"
then have [measurable]: "B ∈ sets M" by simp
have "space M - A ∈ null_sets M" unfolding A_def using assms by (simp add: assms(1) AE_iff_null_sets)
then have "(⋃n. (T^^n)--`(space M - A)) ∈ null_sets M" by (metis null_sets_UN T_quasi_preserves_null2(2))
moreover have "space M - B = (⋃n. (T^^n)--`(space M - A))"
unfolding B_def A_def by auto
ultimately have "space M - B ∈ null_sets M" by simp
have *: "B = (⋂n. (T^^n)--`A)"
unfolding B_def A_def by auto
then have "T--`B = (⋂n. T--` (T^^n)--`A)" by auto
also have "... = (⋂n. (T^^(n+1))--`A)" using T_vrestr_composed(2)[OF ‹A ∈ sets M›] by simp
also have "... ⊇ (⋂n. (T^^n)--`A)" by blast
finally have B1: "B ⊆ T--`B" using * by simp
have "B ⊆ A" using * T_vrestr_0[OF ‹A ∈ sets M›] by blast
then have B2: "⋀x. x ∈ B ⟹ f(T x) ≥ f x" unfolding A_def by auto
define C where "C = (λt. {x ∈ B. f x ≥ t})"
{
fix t
have "C t = B ∩ f-`{t..} ∩ space M" unfolding C_def using sets.sets_into_space[OF ‹B ∈ sets M›] by auto
then have [measurable]: "C t ∈ sets M" using assms(2) by simp
have "C t ⊆ T--`(C t)" using B1 unfolding C_def vimage_restr_def apply auto using B2 order_trans by blast
then have "T--`(C t) - C t ∈ null_sets M" by (metis Diff_mono Un_absorb1 preimage_includes_then_almost_invariant[OF ‹C t ∈ sets M›])
}
then have "(⋃d∈D. T--`(C d) - C d) ∈ null_sets M" using ‹countable D› by (simp add: null_sets_UN')
then have "(space M - B) ∪ (⋃d∈D. T--`(C d) - C d) ∈ null_sets M" using ‹space M - B ∈ null_sets M› by auto
then have "AE x in M. x ∉ (space M - B) ∪ (⋃d∈D. T--`(C d) - C d)" using AE_not_in by blast
moreover
{
fix x assume x: "x ∈ space M" "x ∉ (space M - B) ∪ (⋃d∈D. T--`(C d) - C d)"
then have "x ∈ B" by simp
then have "T x ∈ B" using B1 by auto
have "f(T x) = f x"
proof (rule ccontr)
assume "f(T x) ≠ f x"
then have "f(T x) > f x" using B2[OF ‹x ∈ B›] by simp
then obtain d where d: "d ∈ D" "f(T x) ≥ d ∧ d > f x" using D by auto
then have "T x ∈ C d" using ‹T x ∈ B› unfolding C_def by simp
then have "x ∈ T--`(C d)" using ‹x ∈ space M› by simp
then have "x ∈ C d" using x ‹d ∈ D› by simp
then have "f x ≥ d" unfolding C_def by simp
then show False using d by auto
qed
}
ultimately show ?thesis by auto
qed
text ‹For an invertible map, the invariants of $T$ and $T^{-1}$ are the same.›
lemma Invariants_Tinv:
assumes "invertible_qmpt"
shows "qmpt.Invariants M Tinv = Invariants"
proof -
interpret I: qmpt M Tinv using Tinv_qmpt[OF assms] by auto
have "(T -` A ∩ space M = A) ⟷ (Tinv -` A ∩ space M = A)" if "A ∈ sets M" for A
proof
assume "T -` A ∩ space M = A"
then show "Tinv -` A ∩ space M = A"
using assms that unfolding Tinv_def invertible_qmpt_def
apply auto
apply (metis IntE UNIV_I bij_def imageE inv_f_f vimageE)
apply (metis I.T_spaceM_stable(1) Int_iff Tinv_def bij_inv_eq_iff vimageI)
done
next
assume "Tinv -` A ∩ space M = A"
then show "T -` A ∩ space M = A"
using assms that unfolding Tinv_def invertible_qmpt_def
apply auto
apply (metis IntE bij_def inv_f_f vimageE)
apply (metis T_Tinv_of_set T_meas Tinv_def assms qmpt.vrestr_of_set qmpt_axioms vrestr_image(3))
done
qed
then have "{A ∈ sets M. Tinv -` A ∩ space M = A} = {A ∈ sets M. T -` A ∩ space M = A}"
by blast
then show ?thesis unfolding Invariants_def I.Invariants_def by auto
qed
end
sublocale fmpt ⊆ finite_measure_subalgebra M Invariants
unfolding finite_measure_subalgebra_def finite_measure_subalgebra_axioms_def
using Invariants_is_subalg by (simp add: finite_measureI)
context fmpt
begin
text ‹The conditional expectation with respect to the invariant sigma-algebra is the same
for $f$ or $f \circ T$, essentially by definition.›
lemma Invariants_of_foTn:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
shows "AE x in M. real_cond_exp M Invariants (f o (T^^n)) x = real_cond_exp M Invariants f x"
proof (rule real_cond_exp_charact)
fix A assume [measurable]: "A ∈ sets Invariants"
then have [measurable]: "A ∈ sets M" using Invariants_in_sets by blast
then have ind_meas [measurable]: "((indicator A)::('a ⇒ real)) ∈ borel_measurable Invariants" by auto
have "set_lebesgue_integral M A (f ∘ (T^^n)) = (∫x. indicator A x * f((T^^n) x) ∂M)"
by (auto simp: comp_def set_lebesgue_integral_def)
also have "... = (∫x. indicator A ((T^^n) x) * f ((T^^n) x) ∂M)"
by (rule Bochner_Integration.integral_cong, auto simp add: Invariants_func_is_invariant_n[OF ind_meas])
also have "... = (∫x. indicator A x * f x ∂M)"
apply (rule Tn_integral_preserving(2)) using integrable_mult_indicator[OF ‹A ∈ sets M› assms] by auto
also have "... = (∫x. indicator A x * real_cond_exp M Invariants f x ∂M)"
apply (rule real_cond_exp_intg(2)[symmetric]) using integrable_mult_indicator[OF ‹A ∈ sets M› assms] by auto
also have "... = set_lebesgue_integral M A (real_cond_exp M Invariants f)"
by (auto simp: set_lebesgue_integral_def)
finally show "set_lebesgue_integral M A (f ∘ (T^^n)) = set_lebesgue_integral M A (real_cond_exp M Invariants f)"
by simp
qed (auto simp add: assms real_cond_exp_int Tn_integral_preserving(1)[OF assms] comp_def)
lemma Invariants_of_foT:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
shows "AE x in M. real_cond_exp M Invariants f x = real_cond_exp M Invariants (f o T) x"
using Invariants_of_foTn[OF assms, where ?n = 1] by auto
lemma birkhoff_sum_Invariants:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
shows "AE x in M. real_cond_exp M Invariants (birkhoff_sum f n) x = n * real_cond_exp M Invariants f x"
proof -
define F where "F = (λi. f o (T^^i))"
have [measurable]: "⋀i. F i ∈ borel_measurable M" unfolding F_def by auto
have *: "integrable M (F i)" for i unfolding F_def
by (subst comp_def, rule Tn_integral_preserving(1)[OF assms, of i])
have "AE x in M. n * real_cond_exp M Invariants f x = (∑i∈{..<n}. real_cond_exp M Invariants f x)" by auto
moreover have "AE x in M. (∑i∈{..<n}. real_cond_exp M Invariants f x) = (∑i∈{..<n}. real_cond_exp M Invariants (F i) x)"
apply (rule AE_symmetric[OF AE_equal_sum]) unfolding F_def using Invariants_of_foTn[OF assms] by simp
moreover have "AE x in M. (∑i∈{..<n}. real_cond_exp M Invariants (F i) x) = real_cond_exp M Invariants (λx. ∑i∈{..<n}. F i x) x"
by (rule AE_symmetric[OF real_cond_exp_sum [OF *]])
moreover have "AE x in M. real_cond_exp M Invariants (λx. ∑i∈{..<n}. F i x) x = real_cond_exp M Invariants (birkhoff_sum f n) x"
apply (rule real_cond_exp_cong) unfolding F_def using birkhoff_sum_def[symmetric] by auto
ultimately show ?thesis by auto
qed
end
subsection ‹Birkhoff theorem›
subsubsection ‹Almost everywhere version of Birkhoff theorem›
text ‹This paragraph is devoted to the proof of Birkhoff theorem, arguably
the most fundamental result of ergodic theory.
This theorem asserts that Birkhoff averages of an integrable function $f$ converge almost surely,
to the conditional expectation of $f$ with respect to the invariant sigma algebra.
This result implies for instance the strong law of large numbers (in probability theory).
There are numerous proofs of this statement, but none is really easy. We follow the very efficient
argument given in Katok-Hasselblatt. To help the reader, here is the same proof informally. The
first part of the proof is formalized in \verb+birkhoff_lemma1+, the second one in
\verb+birkhoff_lemma+, and the conclusion in \verb+birkhoff_theorem+.
Start with an integrable function $g$. let $G_n(x) = \max_{k\leq n} S_k g(x)$. Then $\limsup S_n g/n
\leq 0$ outside of $A$, the set where $G_n$ tends to infinity. Moreover, $G_{n+1} - G_n \circ T$ is
bounded by $g$, and tends to $g$ on $A$. It follows from the dominated convergence theorem that
$\int_A G_{n+1} - G_n \circ T \to \int_A g$. As $\int_A G_{n+1} - G_n \circ T = \int_A G_{n+1} - G_n
\geq 0$, we obtain $\int_A g \geq 0$.
Apply now this result to the function $g = f - E(f | I) - \epsilon$, where $\epsilon>0$ is fixed.
Then $\int_A g = -\epsilon \mu(A)$, then have $\mu(A) = 0$. Thus, almost surely, $\limsup S_n
g/n\leq 0$, i.e., $\limsup S_n f/n \leq E(f|I)+\epsilon$. Letting $\epsilon$ tend to $0$ gives
$\limsup S_n f/n \leq E(f|I)$.
Applying the same result to $-f$ gives $S_n f/n \to E(f|I)$.
›
context fmpt
begin
lemma birkhoff_aux1:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
defines "A ≡ {x ∈ space M. limsup (λn. ereal(birkhoff_sum f n x)) = ∞}"
shows "A ∈ sets Invariants" "(∫x. f x * indicator A x ∂M) ≥ 0"
proof -
let ?bsf = "birkhoff_sum f"
have [measurable]: "A ∈ sets M" unfolding A_def by simp
have Ainv: "x ∈ A ⟷ T x ∈ A" if "x ∈ space M" for x
proof -
have "ereal(?bsf (1 + n) x) = ereal(f x) + ereal(?bsf n (T x))" for n
unfolding birkhoff_sum_cocycle birkhoff_sum_1 by simp
moreover have "limsup (λn. ereal(f x) + ereal(?bsf n (T x)))
= ereal(f x) + limsup(λn. ereal(?bsf n (T x)))"
by (rule ereal_limsup_lim_add, auto)
moreover have "limsup (λn. ereal(?bsf (n+1) x)) = limsup (λn. ereal(?bsf n x))" using limsup_shift by simp
ultimately have "limsup (λn. ereal(birkhoff_sum f n x)) = ereal(f x) + limsup (λn. ereal(?bsf n (T x)))" by simp
then have "limsup (λn. ereal(?bsf n x)) = ∞ ⟷ limsup (λn. ereal(?bsf n (T x))) = ∞" by simp
then show "x ∈ A ⟷ T x ∈ A" using ‹x ∈ space M› A_def by simp
qed
then show "A ∈ sets Invariants" using assms(2) Invariants_sets by auto
define F where "F = (λn x. MAX k ∈{0..n}. ?bsf k x)"
have [measurable]: "⋀n. F n ∈ borel_measurable M" unfolding F_def by measurable
have intFn: "integrable M (F n)" for n
unfolding F_def by (rule integrable_MAX, auto simp add: birkhoff_sum_integral(1)[OF assms(1)])
have Frec: "F (n+1) x - F n (T x) = max (-F n (T x)) (f x)" for n x
proof -
have "{0..n+1} = {0} ∪ {1..n+1}" by auto
then have "(λk. ?bsf k x) ` {0..n+1} = (λk. ?bsf k x) ` {0} ∪ (λk. ?bsf k x) ` {1..n+1}" by blast
then have *: "(λk. ?bsf k x) ` {0..n+1} = {0} ∪ (λk. ?bsf k x) ` {1..n+1}" using birkhoff_sum_1(1) by simp
have b: "F (n+1) x = max (Max {0}) (MAX k ∈{1..n+1}. ?bsf k x)"
by (subst F_def, subst *, rule Max.union, auto)
have "(λk. ?bsf k x) ` {1..n+1} = (λk. ?bsf (1+k) x) ` {0..n}" using Suc_le_D by fastforce
also have "... = (λk. f x + ?bsf k (T x)) ` {0..n}"
by (subst birkhoff_sum_cocycle, subst birkhoff_sum_1(2), auto)
finally have c: "F (n+1) x = max 0 (MAX k ∈{0..n}. ?bsf k (T x) + f x)" using b by (simp add: add_ac)
have "{f x + birkhoff_sum f k (T x) |k. k ∈{0..n}} = (+) (f x) ` {birkhoff_sum f k (T x) |k. k ∈{0..n}}" by blast
have "(MAX k ∈{0..n}. ?bsf k (T x) + f x) = (MAX k ∈{0..n}. ?bsf k (T x)) + f x"
by (rule Max_add_commute) auto
also have "... = F n (T x) + f x" unfolding F_def by simp
finally have "(MAX k ∈{0..n}. ?bsf k (T x) + f x) = f x + F n (T x)" by simp
then have "F (n+1) x = max 0 (f x + F n (T x))" using c by simp
then show "F (n+1) x - F n (T x) = max (-F n (T x)) (f x)" by auto
qed
have a: "abs((F (n+1) x - F n (T x)) * indicator A x) ≤ abs(f x)" for n x
proof -
have "F (n+1) x -F n (T x) ≥ f x" using Frec by simp
then have *: "F (n+1) x -F n (T x) ≥ - abs(f x)" by simp
have "F n (T x) ≥ birkhoff_sum f 0 (T x)"
unfolding F_def apply (rule Max_ge, simp) using atLeastAtMost_iff by blast
then have "F n (T x) ≥ 0" using birkhoff_sum_1(1) by simp
then have "-F n (T x) ≤ abs (f x)" by simp
moreover have "f x ≤ abs(f x)" by simp
ultimately have "F (n+1) x -F n (T x) ≤ abs(f x)" using Frec by simp
then have "abs(F (n+1) x - F n (T x)) ≤ abs(f x)" using * by simp
then show "abs((F (n+1) x - F n (T x)) * indicator A x) ≤ abs(f x)" unfolding indicator_def by auto
qed
have b: "(λn. (F (n+1) x - F n (T x)) * indicator A x) ⇢ f x * indicator A x" for x
proof (rule tendsto_eventually, cases)
assume "x ∈ A"
then have "T x ∈ A" using Ainv A_def by auto
then have "limsup (λn. ereal(birkhoff_sum f n (T x))) > ereal(-f x)" unfolding A_def by simp
then obtain N where "ereal(?bsf N (T x)) > ereal(-f x)" using Limsup_obtain by blast
then have *: "?bsf N (T x) > -f x" by simp
{
fix n assume "n≥N"
then have "?bsf N (T x) ∈ (λk. ?bsf k (T x)) ` {0..n}" by auto
then have "F n (T x) ≥ ?bsf N (T x)" unfolding F_def by simp
then have "F n (T x) ≥ -f x" using * by simp
then have "max (-F n (T x)) (f x) = f x" by simp
then have "F (n+1) x - F n (T x) = f x" using Frec by simp
then have "(F (n+1) x - F n (T x)) * indicator A x = f x * indicator A x" by simp
}
then show "eventually (λn. (F (n+1) x - F n (T x)) * indicator A x = f x * indicator A x) sequentially"
using eventually_sequentially by blast
next
assume "¬(x ∈ A)"
then have "indicator A x = (0::real)" by simp
then show "eventually (λn. (F (n+1) x - F n (T x)) * indicator A x = f x * indicator A x) sequentially" by auto
qed
have lim: "(λn. (∫x. (F (n+1) x - F n (T x)) * indicator A x ∂M)) ⇢ (∫x. f x * indicator A x ∂M)"
proof (rule integral_dominated_convergence[where ?w = "(λx. abs(f x))"])
show "integrable M (λx. ¦f x¦)" using assms(1) by auto
show "AE x in M. (λn. (F (n + 1) x - F n (T x)) * indicator A x) ⇢ f x * indicator A x" using b by auto
show "⋀n. AE x in M. norm ((F (n + 1) x - F n (T x)) * indicator A x) ≤ ¦f x¦" using a by auto
qed (simp_all)
have "(∫x. (F (n+1) x - F n (T x)) * indicator A x ∂M) ≥ 0" for n
proof -
have "(∫x. F n (T x) * indicator A x ∂M) = (∫x. (λx. F n x * indicator A x) (T x) ∂M)"
by (rule Bochner_Integration.integral_cong, auto simp add: Ainv indicator_def)
also have "... = (∫x. F n x * indicator A x ∂M)"
by (rule T_integral_preserving, auto simp add: intFn integrable_real_mult_indicator)
finally have i: "(∫x. F n (T x) * indicator A x ∂M) = (∫x. F n x * indicator A x ∂M)" by simp
have "(∫x. (F (n+1) x - F n (T x)) * indicator A x ∂M) = (∫x. F (n+1) x * indicator A x - F n (T x) * indicator A x ∂M)"
by (simp add: mult.commute right_diff_distrib)
also have "... = (∫x. F (n+1) x * indicator A x ∂M) - (∫x. F n (T x) * indicator A x ∂M)"
by (rule Bochner_Integration.integral_diff, auto simp add: intFn integrable_real_mult_indicator T_meas T_integral_preserving(1))
also have "... = (∫x. F (n+1) x * indicator A x ∂M) - (∫x. F n x * indicator A x ∂M)"
using i by simp
also have "... = (∫x. F (n+1) x * indicator A x - F n x * indicator A x ∂M)"
by (rule Bochner_Integration.integral_diff[symmetric], auto simp add: intFn integrable_real_mult_indicator)
also have "... = (∫x. (F (n+1) x - F n x) * indicator A x ∂M)"
by (simp add: mult.commute right_diff_distrib)
finally have *: "(∫x. (F (n+1) x - F n (T x)) * indicator A x ∂M) = (∫x. (F (n+1) x - F n x) * indicator A x ∂M)"
by simp
have "F n x ≤ F (n+1) x" for x unfolding F_def by (rule Max_mono, auto)
then have "(F (n+1) x - F n x) * indicator A x ≥ 0" for x by simp
then have "integral⇧L M (λx. 0) ≤ integral⇧L M (λx. (F (n+1) x - F n x) * indicator A x)"
by (auto simp add: intFn integrable_real_mult_indicator intro: integral_mono)
then have "(∫x. (F (n+1) x - F n x) * indicator A x ∂M) ≥ 0" by simp
then show "(∫x. (F (n+1) x - F n (T x)) * indicator A x ∂M) ≥ 0" using * by simp
qed
then show "(∫x. f x * indicator A x ∂M) ≥ 0" using lim by (simp add: LIMSEQ_le_const)
qed
lemma birkhoff_aux2:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
shows "AE x in M. limsup (λn. ereal(birkhoff_sum f n x / n)) ≤ real_cond_exp M Invariants f x"
proof -
{
fix ε assume "ε > (0::real)"
define g where "g = (λx. f x - real_cond_exp M Invariants f x - ε)"
then have intg: "integrable M g" using assms real_cond_exp_int(1) assms by auto
define A where "A = {x ∈ space M. limsup (λn. ereal(birkhoff_sum g n x)) = ∞}"
have Ag: "A ∈ sets Invariants" "(∫x. g x * indicator A x ∂M) ≥ 0"
unfolding A_def by (rule birkhoff_aux1[where ?f = g, OF intg])+
then have [measurable]: "A ∈ sets M" by (simp add: Invariants_in_sets)
have eq: "(∫x. indicator A x * real_cond_exp M Invariants f x ∂M) = (∫x. indicator A x * f x ∂M)"
proof (rule real_cond_exp_intg[where ?f = "λx. (indicator A x)::real" and ?g = f])
have "(λx. indicator A x * f x) = (λx. f x * indicator A x)" by auto
then show "integrable M (λx. indicator A x * f x)"
using integrable_real_mult_indicator[OF ‹A ∈ sets M› assms] by simp
show "indicator A ∈ borel_measurable Invariants" using ‹A ∈ sets Invariants› by measurable
qed (simp)
have "0 ≤ (∫x. g x * indicator A x ∂M)" using Ag by simp
also have "... = (∫x. f x * indicator A x - real_cond_exp M Invariants f x * indicator A x - ε * indicator A x ∂M)"
unfolding g_def by (simp add: left_diff_distrib)
also have "... = (∫x. f x * indicator A x ∂M) - (∫x. real_cond_exp M Invariants f x * indicator A x ∂M) - (∫x. ε * indicator A x ∂M)"
using assms real_cond_exp_int(1)[OF assms] integrable_real_mult_indicator[OF ‹A ∈ sets M›]
by (auto simp: simp del: integrable_mult_left_iff)
also have "... = - (∫x. ε * indicator A x ∂M)"
by (auto simp add: eq mult.commute)
also have "... = - ε * measure M A" by auto
finally have "0 ≤ - ε * measure M A" by simp
then have "measure M A = 0" using ‹ε > 0› by (simp add: measure_le_0_iff mult_le_0_iff)
then have "A ∈ null_sets M" by (simp add: emeasure_eq_measure null_setsI)
then have "AE x in M. x ∈ space M - A" by (metis (no_types, lifting) AE_cong Diff_iff AE_not_in)
moreover
{
fix x assume "x ∈ space M - A"
then have "limsup (λn. ereal(birkhoff_sum g n x)) < ∞" unfolding A_def by auto
then obtain C where C: "⋀n. birkhoff_sum g n x ≤ C" using limsup_finite_then_bounded by presburger
{
fix n::nat assume "n > 0"
have "birkhoff_sum g n x = birkhoff_sum f n x - birkhoff_sum (real_cond_exp M Invariants f) n x - birkhoff_sum (λx. ε) n x"
unfolding g_def using birkhoff_sum_add birkhoff_sum_diff by auto
moreover have "birkhoff_sum (real_cond_exp M Invariants f) n x = n * real_cond_exp M Invariants f x"
using birkhoff_sum_of_invariants using ‹x ∈ space M - A› by auto
moreover have "birkhoff_sum (λx. ε) n x = n * ε" unfolding birkhoff_sum_def by auto
ultimately have "birkhoff_sum g n x = birkhoff_sum f n x - n * real_cond_exp M Invariants f x - n * ε"
by simp
then have "birkhoff_sum f n x = birkhoff_sum g n x + n * real_cond_exp M Invariants f x + n * ε"
by simp
then have "birkhoff_sum f n x / n = birkhoff_sum g n x / n + real_cond_exp M Invariants f x + ε"
using ‹n > 0› by (simp add: field_simps)
then have "birkhoff_sum f n x / n ≤ C/n + real_cond_exp M Invariants f x + ε"
using C[of n] ‹n > 0› by (simp add: divide_right_mono)
then have "ereal(birkhoff_sum f n x / n) ≤ ereal(C/n + real_cond_exp M Invariants f x + ε)"
by simp
}
then have "eventually (λn. ereal(birkhoff_sum f n x / n) ≤ ereal(C/n + real_cond_exp M Invariants f x + ε)) sequentially"
by (simp add: eventually_at_top_dense)
then have b: "limsup (λn. ereal(birkhoff_sum f n x / n)) ≤ limsup (λn. ereal(C/n + real_cond_exp M Invariants f x + ε))"
by (simp add: Limsup_mono)
have "(λn. ereal(C*(1/real n) + real_cond_exp M Invariants f x + ε)) ⇢ ereal(C * 0 + real_cond_exp M Invariants f x + ε)"
by (intro tendsto_intros)
then have "limsup (λn. ereal(C/real n + real_cond_exp M Invariants f x + ε)) = real_cond_exp M Invariants f x + ε"
using sequentially_bot tendsto_iff_Liminf_eq_Limsup by force
then have "limsup (λn. ereal(birkhoff_sum f n x / n)) ≤ real_cond_exp M Invariants f x + ε"
using b by simp
}
ultimately have "AE x in M. limsup (λn. ereal(birkhoff_sum f n x / n)) ≤ real_cond_exp M Invariants f x + ε"
by auto
then have "AE x in M. limsup (λn. ereal(birkhoff_sum f n x / n)) ≤ ereal(real_cond_exp M Invariants f x) + ε"
by auto
}
then show ?thesis
by (rule AE_upper_bound_inf_ereal)
qed
theorem birkhoff_theorem_AE_nonergodic:
fixes f::"'a ⇒ real"
assumes "integrable M f"
shows "AE x in M. (λn. birkhoff_sum f n x / n) ⇢ real_cond_exp M Invariants f x"
proof -
{
fix x assume i: "limsup (λn. ereal(birkhoff_sum f n x /n)) ≤ real_cond_exp M Invariants f x"
and ii: "limsup (λn. ereal(birkhoff_sum (λx. -f x) n x / n)) ≤ real_cond_exp M Invariants (λx. -f x) x"
and iii: "real_cond_exp M Invariants (λx. -f x) x = - real_cond_exp M Invariants f x"
have "⋀n. birkhoff_sum (λx. -f x) n x = - birkhoff_sum f n x"
using birkhoff_sum_cmult[where ?c = "-1" and ?f = f] by auto
then have "⋀n. ereal(birkhoff_sum (λx. -f x) n x / n) = - ereal(birkhoff_sum f n x / n)" by auto
moreover have "limsup (λn. - ereal(birkhoff_sum f n x / n)) = - liminf (λn. ereal(birkhoff_sum f n x /n))"
by (rule ereal_Limsup_uminus)
ultimately have "-liminf (λn. ereal(birkhoff_sum f n x /n)) = limsup (λn. ereal(birkhoff_sum (λx. -f x) n x / n))"
by simp
then have "-liminf (λn. ereal(birkhoff_sum f n x /n)) ≤ - real_cond_exp M Invariants f x"
using ii iii by simp
then have "liminf (λn. ereal(birkhoff_sum f n x /n)) ≥ real_cond_exp M Invariants f x"
by (simp add: ereal_uminus_le_reorder)
then have "(λn. birkhoff_sum f n x /n) ⇢ real_cond_exp M Invariants f x"
using i by (simp add: limsup_le_liminf_real)
} note * = this
moreover have "AE x in M. limsup (λn. ereal(birkhoff_sum f n x /n)) ≤ real_cond_exp M Invariants f x"
using birkhoff_aux2 assms by simp
moreover have "AE x in M. limsup (λn. ereal(birkhoff_sum (λx. -f x) n x / n)) ≤ real_cond_exp M Invariants (λx. -f x) x"
using birkhoff_aux2 assms by simp
moreover have "AE x in M. real_cond_exp M Invariants (λx. -f x) x = - real_cond_exp M Invariants f x"
using real_cond_exp_cmult[where ?c = "-1"] assms by force
ultimately show ?thesis by auto
qed
text ‹If a function $f$ is integrable, then $E(f\circ T - f | I) = E(f\circ T | I) - E(f|I) = 0$.
Hence, $S_n(f \circ T - f) / n$ converges almost everywhere to $0$, i.e., $f(T^n x)/n \to 0$.
It is remarkable (and sometimes useful) that this holds under the weaker condition that
$f\circ T - f$ is integrable (but not necessarily $f$), where this naive argument fails.
The reason is that the Birkhoff sum of $f \circ T - f$ is $f\circ T^n - f$. If $n$ is such that $x$
and $T^n(x)$ belong to a set where $f$ is bounded, it follows that this Birkhoff sum is also
bounded. Along such a sequence of times, $S_n(f\circ T - f)/n$ tends to $0$.
By Poincare recurrence theorem, there are such times for almost every points. As it also converges
to $E(f \circ T - f | I)$, it follows that this function is almost everywhere $0$. Then
$f (T^n x)/n = S_n(f\circ T^n - f)/n - f/n$ tends almost surely to $E(f\circ T-f |I) = 0$.
›
lemma limit_foTn_over_n:
fixes f::"'a ⇒ real"
assumes [measurable]: "f ∈ borel_measurable M"
and "integrable M (λx. f(T x) - f x)"
shows "AE x in M. real_cond_exp M Invariants (λx. f(T x) - f x) x = 0"
"AE x in M. (λn. f((T^^n) x) / n) ⇢ 0"
proof -
define E::"nat ⇒ 'a set" where "E k = {x ∈ space M. ¦f x¦ ≤ k}" for k
have [measurable]: "E k ∈ sets M" for k unfolding E_def by auto
have *: "(⋃k. E k) = space M" unfolding E_def by (auto simp add: real_arch_simple)
define F::"nat ⇒ 'a set" where "F k = recurrent_subset_infty (E k)" for k
have [measurable]: "F k ∈ sets M" for k unfolding F_def by auto
have **: "E k - F k ∈ null_sets M" for k unfolding F_def using Poincare_recurrence_thm by auto
have "space M - (⋃k. F k) ∈ null_sets M"
apply (rule null_sets_subset[of "(⋃k. E k - F k)"]) unfolding *[symmetric] using ** by auto
with AE_not_in[OF this] have "AE x in M. x ∈ (⋃k. F k)" by auto
moreover have "AE x in M. (λn. birkhoff_sum (λx. f(T x) - f x) n x / n)
⇢ real_cond_exp M Invariants (λx. f(T x) - f x) x"
by (rule birkhoff_theorem_AE_nonergodic[OF assms(2)])
moreover have "real_cond_exp M Invariants (λx. f(T x) - f x) x = 0 ∧ (λn. f((T^^n) x) / n) ⇢ 0"
if H: "(λn. birkhoff_sum (λx. f(T x) - f x) n x / n) ⇢ real_cond_exp M Invariants (λx. f(T x) - f x) x"
"x ∈ (⋃k. F k)" for x
proof -
have "f((T^^n) x) = birkhoff_sum (λx. f(T x) - f x) n x + f x" for n
unfolding birkhoff_sum_def by (induction n, auto)
then have "f((T^^n) x) / n = birkhoff_sum (λx. f(T x) - f x) n x / n + f x * (1/n)" for n
by (auto simp add: divide_simps)
moreover have "(λn. birkhoff_sum (λx. f(T x) - f x) n x / n + f x * (1/n)) ⇢ real_cond_exp M Invariants (λx. f(T x) - f x) x + f x * 0"
by (intro tendsto_intros H(1))
ultimately have lim: "(λn. f((T^^n) x) / n) ⇢ real_cond_exp M Invariants (λx. f(T x) - f x) x"
by auto
obtain k where "x ∈ F k" using H(2) by auto
then have "infinite {n. (T^^n) x ∈ E k}"
unfolding F_def recurrent_subset_infty_inf_returns by auto
with infinite_enumerate[OF this] obtain r :: "nat ⇒ nat"
where r: "strict_mono r" "⋀n. r n ∈ {n. (T^^n) x ∈ E k}"
by auto
have A: "(λn. k * (1/r n)) ⇢ real k * 0"
apply (intro tendsto_intros)
using LIMSEQ_subseq_LIMSEQ[OF lim_1_over_n ‹strict_mono r›] unfolding comp_def by auto
have B: "¦f((T^^(r n)) x) / r n¦ ≤ k / (r n)" for n
using r(2) unfolding E_def by (auto simp add: divide_simps)
have "(λn. f((T^^(r n)) x) / r n) ⇢ 0"
apply (rule tendsto_rabs_zero_cancel, rule tendsto_sandwich[of "λn. 0" _ _ "λn. k * (1/r n)"])
using A B by auto
moreover have "(λn. f((T^^(r n)) x) / r n) ⇢ real_cond_exp M Invariants (λx. f(T x) - f x) x"
using LIMSEQ_subseq_LIMSEQ[OF lim ‹strict_mono r›] unfolding comp_def by auto
ultimately have *: "real_cond_exp M Invariants (λx. f(T x) - f x) x = 0"
using LIMSEQ_unique by auto
then have "(λn. f((T^^n) x) / n) ⇢ 0" using lim by auto
then show ?thesis using * by auto
qed
ultimately show "AE x in M. real_cond_exp M Invariants (λx. f(T x) - f x) x = 0"
"AE x in M. (λn. f((T^^n) x) / n) ⇢ 0"
by auto
qed
text ‹We specialize the previous statement to the case where $f$ itself is integrable.›
lemma limit_foTn_over_n':
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
shows "AE x in M. (λn. f((T^^n) x) / n) ⇢ 0"
by (rule limit_foTn_over_n, simp, rule Bochner_Integration.integrable_diff)
(auto intro: assms T_integral_preserving(1))
text ‹It is often useful to show that a function is cohomologous to a nicer function, i.e., to
prove that a given $f$ can be written as $f = g + u - u \circ T$ where $g$ is nicer than $f$. We
show below that any integrable function is cohomologous to a function which is arbitrarily close to
$E(f|I)$. This is an improved version of Lemma 2.1 in [Benoist-Quint, Annals of maths, 2011]. Note
that the function $g$ to which $f$ is cohomologous is very nice (and, in particular, integrable),
but the transfer function is only measurable in this argument. The fact that the control on
conditional expectation is nevertheless preserved throughout the argument follows from
Lemma~\verb+limit_foTn_over_n+ above.›
text ‹We start with the lemma (and the proof) of [BQ2011]. It shows that, if a function has a
conditional expectation with respect to invariants which is positive, then it is cohomologous to a
nonnegative function. The argument is the clever remark that $g = \max (0, \inf_n S_n f)$ and $u =
\min (0, \inf_n S_n f)$ work (where these expressions are well defined as $S_n f$ tends to infinity
thanks to our assumption).›
lemma cohomologous_approx_cond_exp_aux:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
and "AE x in M. real_cond_exp M Invariants f x > 0"
shows "∃u g. u ∈ borel_measurable M ∧ (integrable M g) ∧ (AE x in M. g x ≥ 0 ∧ g x ≤ max 0 (f x)) ∧ (∀x. f x = g x + u x - u (T x))"
proof -
define h::"'a ⇒ real" where "h = (λx. (INF n∈{1..}. birkhoff_sum f n x))"
define u where "u = (λx. min (h x) 0)"
define g where "g = (λx. f x - u x + u (T x))"
have [measurable]: "h ∈ borel_measurable M" "u ∈ borel_measurable M" "g ∈ borel_measurable M"
unfolding g_def h_def u_def by auto
have "f x = g x + u x - u (T x)" for x unfolding g_def by auto
{
fix x assume H: "real_cond_exp M Invariants f x > 0"
"(λn. birkhoff_sum f n x / n) ⇢ real_cond_exp M Invariants f x"
have "eventually (λn. ereal(birkhoff_sum f n x / n) * ereal n = ereal(birkhoff_sum f n x)) sequentially"
unfolding eventually_sequentially by (rule exI[of _ 1], auto)
moreover have "(λn. ereal(birkhoff_sum f n x / n) * ereal n) ⇢ ereal(real_cond_exp M Invariants f x) * ∞"
apply (intro tendsto_intros) using H by auto
ultimately have "(λn. ereal(birkhoff_sum f n x)) ⇢ ereal(real_cond_exp M Invariants f x) * ∞"
by (blast intro: Lim_transform_eventually)
then have "(λn. ereal(birkhoff_sum f n x)) ⇢ ∞"
using H by auto
then have B: "∃C. ∀n. C ≤ birkhoff_sum f n x"
by (intro liminf_finite_then_bounded_below, simp add: liminf_PInfty)
have "h x ≤ f x"
unfolding h_def apply (rule cInf_lower) using B by force+
have "{birkhoff_sum f n (T x) |n. n ∈ {1..}} = {birkhoff_sum f (1+n) (x) - f x |n. n ∈ {1..}}"
unfolding birkhoff_sum_cocycle by auto
also have "... = {birkhoff_sum f n x - f x |n. n ∈ {2..}}"
by (metis (no_types, hide_lams) Suc_1 Suc_eq_plus1_left Suc_le_D Suc_le_mono atLeast_iff)
finally have *: "{birkhoff_sum f n (T x) |n. n ∈ {1..}} = (λt. t - (f x))`{birkhoff_sum f n x |n. n ∈ {2..}}"
by auto
have "h(T x) = Inf {birkhoff_sum f n (T x) |n. n ∈ {1..}}"
unfolding h_def by (metis Setcompr_eq_image)
also have "... = (⨅t∈{birkhoff_sum f n x |n. n ∈ {2..}}. t - f x)"
by (simp only: *)
also have "... = (λt. t - (f x)) (Inf {birkhoff_sum f n x |n. n ∈ {2..}})"
using B by (auto intro!: monoI bijI mono_bij_cInf [symmetric])
finally have I: "Inf {birkhoff_sum f n x |n. n ∈ {2..}} = f x + h (T x)" by auto
have "max 0 (h x) + u x = h x"
unfolding u_def by auto
also have "... = Inf {birkhoff_sum f n x |n. n ∈ {1..}}"
unfolding h_def by (metis Setcompr_eq_image)
also have "... = Inf ({birkhoff_sum f n x |n. n ∈ {1}} ∪ {birkhoff_sum f n x |n. n ∈ {2..}})"
by (auto intro!: arg_cong[of _ _ Inf], metis One_nat_def Suc_1 antisym birkhoff_sum_1(2) not_less_eq_eq, force)
also have "Inf ({birkhoff_sum f n x |n. n ∈ {1}} ∪ {birkhoff_sum f n x |n. n ∈ {2..}})
= min (Inf {birkhoff_sum f n x |n. n ∈ {1}}) (Inf {birkhoff_sum f n x |n. n ∈ {2..}})"
unfolding inf_min[symmetric] apply (intro cInf_union_distrib) using B by auto
also have "... = min (f x) (f x + h (T x))" using I by auto
also have "... = f x + u (T x)" unfolding u_def by auto
finally have "max 0 (h x) = f x + u (T x) - u x" by auto
then have "g x = max 0 (h x)" unfolding g_def by auto
then have "g x ≥ 0 ∧ g x ≤ max 0 (f x)" using ‹h x ≤ f x› by auto
}
then have *: "AE x in M. g x ≥ 0 ∧ g x ≤ max 0 (f x)"
using assms(2) birkhoff_theorem_AE_nonergodic[OF assms(1)] by auto
moreover have "integrable M g"
apply (rule Bochner_Integration.integrable_bound[of _ f]) using * by (auto simp add: assms)
ultimately have "u ∈ borel_measurable M ∧ integrable M g ∧ (AE x in M. 0 ≤ g x ∧ g x ≤ max 0 (f x)) ∧ (∀x. f x = g x + u x - u (T x))"
using ‹⋀x. f x = g x + u x - u (T x)› ‹u ∈ borel_measurable M› by auto
then show ?thesis by blast
qed
text ‹To deduce the stronger version that $f$ is cohomologous to an arbitrarily good approximation
of $E(f|I)$, we apply the previous lemma twice, to control successively the negative and the
positive side. The sign control in the conclusion of the previous lemma implies that the second step
does not spoil the first one.›
lemma cohomologous_approx_cond_exp:
fixes f::"'a ⇒ real" and B::"'a ⇒ real"
assumes [measurable]: "integrable M f" "B ∈ borel_measurable M"
and "AE x in M. B x > 0"
shows "∃g u. u ∈ borel_measurable M
∧ integrable M g
∧ (∀x. f x = g x + u x - u (T x))
∧ (AE x in M. abs(g x - real_cond_exp M Invariants f x) ≤ B x)"
proof -
define C where "C = (λx. min (B x) 1)"
have [measurable]: "integrable M C"
apply (rule Bochner_Integration.integrable_bound[of _ "λ_. (1::real)"], auto)
unfolding C_def using assms(3) by auto
have "C x ≤ B x" for x unfolding C_def by auto
have "AE x in M. C x > 0" unfolding C_def using assms(3) by auto
have AECI: "AE x in M. real_cond_exp M Invariants C x > 0"
by (intro real_cond_exp_gr_c ‹integrable M C› ‹AE x in M. C x > 0›)
define f1 where "f1 = (λx. f x - real_cond_exp M Invariants f x)"
have "integrable M f1"
unfolding f1_def by (intro Bochner_Integration.integrable_diff ‹integrable M f› real_cond_exp_int(1))
have "AE x in M. real_cond_exp M Invariants f1 x = real_cond_exp M Invariants f x - real_cond_exp M Invariants (real_cond_exp M Invariants f) x"
unfolding f1_def apply (rule real_cond_exp_diff) by (intro Bochner_Integration.integrable_diff
‹integrable M f› ‹integrable M C› real_cond_exp_int(1))+
moreover have "AE x in M. real_cond_exp M Invariants (real_cond_exp M Invariants f) x = real_cond_exp M Invariants f x"
by (intro real_cond_exp_nested_subalg subalg ‹integrable M f›, auto)
ultimately have AEf1: "AE x in M. real_cond_exp M Invariants f1 x = 0" by auto
have A [measurable]: "integrable M (λx. f1 x + C x)"
by (intro Bochner_Integration.integrable_add ‹integrable M f1› ‹integrable M C›)
have "AE x in M. real_cond_exp M Invariants (λx. f1 x + C x) x = real_cond_exp M Invariants f1 x + real_cond_exp M Invariants C x"
by (intro real_cond_exp_add ‹integrable M f1› ‹integrable M C›)
then have B: "AE x in M. real_cond_exp M Invariants (λx. f1 x + C x) x > 0"
using AECI AEf1 by auto
obtain u2 g2 where H2: "u2 ∈ borel_measurable M" "integrable M g2" "AE x in M. g2 x ≥ 0 ∧ g2 x ≤ max 0 (f1 x + C x)" "⋀x. f1 x + C x = g2 x + u2 x - u2 (T x)"
using cohomologous_approx_cond_exp_aux[OF A B] by blast
define f2 where "f2 = (λx. (g2 x - C x))"
have *: "u2(T x) - u2 x = f2 x -f1 x" for x unfolding f2_def using H2(4)[of x] by auto
have "AE x in M. f2 x ≥ - C x" using H2(3) unfolding f2_def by auto
have "integrable M f2"
unfolding f2_def by (intro Bochner_Integration.integrable_diff ‹integrable M g2› ‹integrable M C›)
have "AE x in M. real_cond_exp M Invariants (λx. u2(T x) - u2 x) x = 0"
proof (rule limit_foTn_over_n)
show "integrable M (λx. u2(T x) - u2 x)"
unfolding * by (intro Bochner_Integration.integrable_diff ‹integrable M f1› ‹integrable M f2›)
qed (simp add: ‹u2 ∈ borel_measurable M›)
then have "AE x in M. real_cond_exp M Invariants (λx. f2 x - f1 x) x = 0"
unfolding * by simp
moreover have "AE x in M. real_cond_exp M Invariants (λx. f2 x - f1 x) x = real_cond_exp M Invariants f2 x - real_cond_exp M Invariants f1 x"
by (intro real_cond_exp_diff ‹integrable M f2› ‹integrable M f1›)
ultimately have AEf2: "AE x in M. real_cond_exp M Invariants f2 x = 0"
using AEf1 by auto
have A [measurable]: "integrable M (λx. C x - f2 x)"
by (intro Bochner_Integration.integrable_diff ‹integrable M f2› ‹integrable M C›)
have "AE x in M. real_cond_exp M Invariants (λx. C x - f2 x) x = real_cond_exp M Invariants C x - real_cond_exp M Invariants f2 x"
by (intro real_cond_exp_diff ‹integrable M f2› ‹integrable M C›)
then have B: "AE x in M. real_cond_exp M Invariants (λx. C x - f2 x) x > 0"
using AECI AEf2 by auto
obtain u3 g3 where H3: "u3 ∈ borel_measurable M" "integrable M g3" "AE x in M. g3 x ≥ 0 ∧ g3 x ≤ max 0 (C x - f2 x)" "⋀x. C x - f2 x = g3 x + u3 x - u3 (T x)"
using cohomologous_approx_cond_exp_aux[OF A B] by blast
define f3 where "f3 = (λx. C x - g3 x)"
have "AE x in M. f3 x ≥ min (C x) (f2 x)" unfolding f3_def using H3(3) by auto
then have "AE x in M. f3 x ≥ -C x" using ‹AE x in M. f2 x ≥ - C x› ‹AE x in M. C x > 0› by auto
moreover have "AE x in M. f3 x ≤ C x" unfolding f3_def using H3(3) by auto
ultimately have "AE x in M. abs(f3 x) ≤ C x" by auto
then have *: "AE x in M. abs(f3 x) ≤ B x" using order_trans[OF _ ‹⋀x. C x ≤ B x›] by auto
define g where "g = (λx. f3 x + real_cond_exp M Invariants f x)"
define u where "u = (λx. u2 x - u3 x)"
have "AE x in M. abs (g x - real_cond_exp M Invariants f x) ≤ B x"
unfolding g_def using * by auto
moreover have "f x = g x + u x - u(T x)" for x
using H3(4)[of x] H2(4)[of x] unfolding u_def g_def f3_def f2_def f1_def by auto
moreover have "u ∈ borel_measurable M"
unfolding u_def using ‹u2 ∈ borel_measurable M› ‹u3 ∈ borel_measurable M› by auto
moreover have "integrable M g"
unfolding g_def f3_def by (intro Bochner_Integration.integrable_add Bochner_Integration.integrable_diff
‹integrable M C› ‹integrable M g3› ‹integrable M f› real_cond_exp_int(1))
ultimately show ?thesis by auto
qed
subsubsection ‹$L^1$ version of Birkhoff theorem›
text ‹The $L^1$ convergence in Birkhoff theorem follows from the almost everywhere convergence and
general considerations on $L^1$ convergence (Scheffe's lemma) explained
in \verb+AE_and_int_bound_implies_L1_conv2+.
This argument works neatly for nonnegative functions, the general case reduces to this one by taking
the positive and negative parts of a given function.
One could also prove it by truncation: for bounded functions, the $L^1$ convergence follows
from the boundedness and almost sure convergence. The general case follows by density, but it
is a little bit tedious to write as one need to make sure that the conditional expectation
of the truncation converges to the conditional expectation of the original function. This is true
in $L^1$ as the conditional expectation is a contraction in $L^1$, it follows almost everywhere
after taking a subsequence. All in all, the argument based on Scheffe's lemma seems more
economical.›
lemma birkhoff_lemma_L1:
fixes f::"'a ⇒ real"
assumes "⋀x. f x ≥ 0"
and [measurable]: "integrable M f"
shows "(λn. ∫⇧+x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) ∂M) ⇢ 0"
proof (rule Scheffe_lemma2)
show i: "integrable M (real_cond_exp M Invariants f)" using assms by (simp add: real_cond_exp_int(1))
show "AE x in M. (λn. birkhoff_sum f n x / real n) ⇢ real_cond_exp M Invariants f x"
using birkhoff_theorem_AE_nonergodic assms by simp
fix n
have [measurable]: "(λx. ennreal ¦birkhoff_sum f n x¦) ∈ borel_measurable M" by measurable
show [measurable]: "(λx. birkhoff_sum f n x / real n) ∈ borel_measurable M" by measurable
have "AE x in M. real_cond_exp M Invariants f x ≥ 0" using assms(1) real_cond_exp_pos by simp
then have *: "AE x in M. norm (real_cond_exp M Invariants f x) = real_cond_exp M Invariants f x" by auto
have **: "(∫ x. norm (real_cond_exp M Invariants f x) ∂M) = (∫ x. real_cond_exp M Invariants f x ∂M)"
apply (rule integral_cong_AE) using * by auto
have "(∫⇧+x. ennreal (norm (real_cond_exp M Invariants f x)) ∂M) = (∫ x. norm (real_cond_exp M Invariants f x) ∂M)"
by (rule nn_integral_eq_integral) (auto simp add: i)
also have "... = (∫ x. real_cond_exp M Invariants f x ∂M)"
using ** by simp
also have "... = (∫ x. f x ∂M)"
using real_cond_exp_int(2) assms(2) by auto
also have "... = (∫x. norm(f x) ∂M)" using assms by auto
also have "... = (∫⇧+x. norm(f x) ∂M)"
by (rule nn_integral_eq_integral[symmetric], auto simp add: assms(2))
finally have eq: "(∫⇧+ x. norm (real_cond_exp M Invariants f x) ∂M) = (∫⇧+ x. norm(f x) ∂M)" by simp
{
fix x
have "norm(birkhoff_sum f n x) ≤ birkhoff_sum (λx. norm(f x)) n x"
using birkhoff_sum_abs by fastforce
then have "norm(birkhoff_sum f n x) ≤ birkhoff_sum (λx. ennreal(norm(f x))) n x"
unfolding birkhoff_sum_def by auto
}
then have "(∫⇧+x. norm(birkhoff_sum f n x) ∂M) ≤ (∫⇧+x. birkhoff_sum (λx. ennreal(norm(f x))) n x ∂M)"
by (simp add: nn_integral_mono)
also have "... = n * (∫⇧+x. norm(f x) ∂M)"
by (rule birkhoff_sum_nn_integral, auto)
also have "... = n * (∫⇧+ x. norm (real_cond_exp M Invariants f x) ∂M)"
using eq by simp
finally have *: "(∫⇧+x. norm(birkhoff_sum f n x) ∂M) ≤ n * (∫⇧+ x. norm (real_cond_exp M Invariants f x) ∂M)"
by simp
show "(∫⇧+ x. ennreal (norm (birkhoff_sum f n x / real n)) ∂M) ≤ (∫⇧+ x. norm (real_cond_exp M Invariants f x) ∂M)"
proof (cases)
assume "n = 0"
then show ?thesis by auto
next
assume "¬(n = 0)"
then have "n > 0" by simp
then have "1/ennreal(real n) ≥ 0" by simp
have "(∫⇧+ x. ennreal (norm (birkhoff_sum f n x / real n)) ∂M) = (∫⇧+ x. ennreal (norm (birkhoff_sum f n x)) / ennreal(real n) ∂M)"
using ‹n > 0› by (auto simp: divide_ennreal)
also have "... = (∫⇧+ x. (1/ennreal(real n)) * ennreal (norm (birkhoff_sum f n x)) ∂M)"
by (simp add: ‹0 < n› divide_ennreal_def mult.commute)
also have "... = (1/ennreal(real n) * (∫⇧+ x. ennreal (norm (birkhoff_sum f n x)) ∂M))"
by (subst nn_integral_cmult) auto
also have "... ≤ (1/ennreal(real n)) * (ennreal(real n) * (∫⇧+ x. norm (real_cond_exp M Invariants f x) ∂M))"
using * by (intro mult_mono) (auto simp: ennreal_of_nat_eq_real_of_nat)
also have "... = (∫⇧+ x. norm (real_cond_exp M Invariants f x) ∂M)"
using ‹n > 0›
by (auto simp del: ennreal_1 simp add: ennreal_1[symmetric] divide_ennreal ennreal_mult[symmetric] mult.assoc[symmetric])
simp
finally show ?thesis by simp
qed
qed
theorem birkhoff_theorem_L1_nonergodic:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
shows "(λn. ∫⇧+x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) ∂M) ⇢ 0"
proof -
define g where "g = (λx. max (f x) 0)"
have g_int [measurable]: "integrable M g" unfolding g_def using assms by auto
define h where "h = (λx. max (-f x) 0)"
have h_int [measurable]: "integrable M h" unfolding h_def using assms by auto
have "f = (λx. g x - h x)" unfolding g_def h_def by auto
{
fix n::nat assume "n > 0"
have "⋀x. birkhoff_sum f n x = birkhoff_sum g n x - birkhoff_sum h n x" using birkhoff_sum_diff ‹f = (λx. g x - h x)› by auto
then have "⋀x. birkhoff_sum f n x / n = birkhoff_sum g n x / n - birkhoff_sum h n x / n" using ‹n > 0› by (simp add: diff_divide_distrib)
moreover have "AE x in M. real_cond_exp M Invariants g x - real_cond_exp M Invariants h x = real_cond_exp M Invariants f x"
using AE_symmetric[OF real_cond_exp_diff] g_int h_int ‹f = (λx. g x - h x)› by auto
ultimately have "AE x in M. birkhoff_sum f n x / n - real_cond_exp M Invariants f x =
(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) - (birkhoff_sum h n x / n - real_cond_exp M Invariants h x)"
by auto
then have *: "AE x in M. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) ≤
norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) + norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x)"
by auto
have "(∫⇧+ x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) ∂M) ≤
(∫⇧+ x. ennreal(norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x)) + norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) ∂M)"
apply (rule nn_integral_mono_AE) using * by (simp add: ennreal_plus[symmetric] del: ennreal_plus)
also have "... = (∫⇧+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) ∂M) + (∫⇧+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) ∂M)"
apply (rule nn_integral_add) apply auto using real_cond_exp_F_meas borel_measurable_cond_exp2 by measurable
finally have "(∫⇧+ x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) ∂M) ≤
(∫⇧+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) ∂M) + (∫⇧+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) ∂M)"
by simp
}
then have *: "eventually (λn. (∫⇧+ x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) ∂M) ≤
(∫⇧+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) ∂M) + (∫⇧+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) ∂M))
sequentially"
using eventually_at_top_dense by auto
have **: "eventually (λn. (∫⇧+ x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) ∂M) ≥ 0) sequentially"
by simp
have "(λn. (∫⇧+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) ∂M)) ⇢ 0"
apply (rule birkhoff_lemma_L1, auto simp add: g_int) unfolding g_def by auto
moreover have "(λn. (∫⇧+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) ∂M)) ⇢ 0"
apply (rule birkhoff_lemma_L1, auto simp add: h_int) unfolding h_def by auto
ultimately have "(λn. (∫⇧+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) ∂M) + (∫⇧+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) ∂M)) ⇢ 0"
using tendsto_add[of _ 0 _ _ 0] by auto
then show ?thesis
using tendsto_sandwich[OF ** *] by auto
qed
subsubsection ‹Conservativity of skew products›
text ‹The behaviour of skew-products of the form $(x, y) \mapsto (Tx, y + f x)$ is directly related
to Birkhoff theorem, as the iterates involve the Birkhoff sums in the fiber. Birkhoff theorem
implies that such a skew product is conservative when the function $f$ has vanishing conditional
expectation.
To prove the theorem, assume by contradiction that a set $A$ with positive measure does not
intersect its preimages. Replacing $A$ with a smaller set $C$, we can assume that $C$ is bounded in
the $y$-direction, by a constant $N$, and also that all its nonempty vertical fibers, above the
projection $Cx$, have a measure bounded from below. Then, by Birkhoff theorem, for any $r>0$, most
of the first $n$ preimages of $C$ are contained in the set $\{|y| \leq r n+N\}$, of measure $O(r
n)$. Hence, they can not be disjoint if $r < \mu(C)$. To make this argument rigorous, one should
only consider the preimages whose $x$-component belongs to a set $Dx$ where the Birkhoff sums are
small. This condition has a positive measure if $\mu(Cx) + \mu(Dx) > \mu(M)$, which one can ensure
by taking $Dx$ large enough.›
theorem (in fmpt) skew_product_conservative:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
and "AE x in M. real_cond_exp M Invariants f x = 0"
shows "conservative_mpt (M ⨂⇩M lborel) (λ(x,y). (T x, y + f x))"
proof (rule conservative_mptI)
let ?TS = "(λ(x,y). (T x, y + f x))"
let ?MS = "M ⨂⇩M (lborel::real measure)"
have f_meas [measurable]: "f ∈ borel_measurable M" by auto
have "mpt M T" by (simp add: mpt_axioms)
with mpt_skew_product_real[OF this f_meas] show "mpt ?MS ?TS" by simp
then interpret TS: mpt ?MS ?TS by auto
fix A::"('a × real) set"
assume A1 [measurable]: "A ∈ sets ?MS" and A2: "emeasure ?MS A > 0"
have "A = (⋃N::nat. A ∩ {(x,y). abs(y) ≤ N})" by (auto simp add: real_arch_simple)
then have *: "emeasure ?MS (⋃N::nat. A ∩ {(x,y). abs(y) ≤ N}) > 0"
using A2 by simp
have "space ?MS = space M × space (lborel::real measure)" using space_pair_measure by auto
then have A_inc: "A ⊆ space M × space (lborel::real measure)" using sets.sets_into_space[OF A1] by auto
{
fix N::nat
have "{(x, y). abs(y) ≤ real N ∧ x ∈ space M} = space M × {-(real N)..(real N)}" by auto
then have "{(x, y). ¦y¦ ≤ real N ∧ x ∈ space M} ∈ sets ?MS" by auto
then have "A ∩ {(x, y). ¦y¦ ≤ real N ∧ x ∈ space M} ∈ sets ?MS" using A1 by auto
moreover have "A ∩ {(x,y). abs(y) ≤ real N} = A ∩ {(x, y). ¦y¦ ≤ real N ∧ x ∈ space M}"
using A_inc by blast
ultimately have "A ∩ {(x,y). abs(y) ≤ real N} ∈ sets ?MS" by auto
}
then have [measurable]: "⋀N::nat. A ∩ {(x, y). ¦y¦ ≤ real N} ∈ sets (M ⨂⇩M borel)" by auto
have "∃N::nat. emeasure ?MS (A ∩ {(x,y). abs(y) ≤ N}) > 0"
apply (rule emeasure_pos_unionE) using * by auto
then obtain N::nat where N: "emeasure ?MS (A ∩ {(x,y). abs(y) ≤ N}) > 0"
by auto
define B where "B = A ∩ {(x,y). abs(y) ≤ N}"
have B_meas [measurable]: "B ∈ sets (M ⨂⇩M lborel)" unfolding B_def by auto
have "0 < emeasure (M ⨂⇩M lborel) B" unfolding B_def using N by auto
also have "... = (∫⇧+x. emeasure lborel (Pair x -` B) ∂M)"
apply (rule sigma_finite_measure.emeasure_pair_measure_alt)
using B_meas by (auto simp add: lborel.sigma_finite_measure_axioms)
finally have *: "(∫⇧+x. emeasure lborel (Pair x -` B) ∂M) > 0" by simp
have "∃Cx∈sets M. ∃e::real>0. emeasure M Cx > 0 ∧ (∀x ∈ Cx. emeasure lborel (Pair x -` B) ≥ e)"
by (rule not_AE_zero_int_ennreal_E, auto simp add: *)
then obtain Cx e where [measurable]: "Cx ∈ sets M" and Cxe: "e>(0::real)" "emeasure M Cx > 0" "⋀x. x ∈ Cx ⟹ emeasure lborel (Pair x -` B) ≥ e"
by blast
define C where "C = B ∩ (Cx × (UNIV::real set))"
have C_meas [measurable]: "C ∈ sets (M ⨂⇩M lborel)" unfolding C_def using B_meas by auto
have Cx_fibers: "⋀x. x ∈ Cx ⟹ emeasure lborel (Pair x -` C) ≥ e" using Cxe(3) C_def by auto
define c where "c = (measure M Cx)/2"
have "c > 0" unfolding c_def using Cxe(2) by (simp add: emeasure_eq_measure)
text ‹We will apply Birkhoff theorem to show that most preimages of $C$ at time $n$ are contained in a cylinder
of height roughly $r n$, for some suitably small $r$. How small $r$ should be to get a
contradiction can be determined at the end of the proof. It turns out that the good condition
is the following one -- this is by no means obvious now.›
define r where "r = (if measure M (space M) = 0 then 1 else e * c / (4 * measure M (space M)))"
have "r > 0" using ‹e > 0› ‹c > 0› unfolding r_def
apply auto using measure_le_0_iff by fastforce
have pos: "e*c-2*r*measure M (space M) > 0" using ‹e > 0› ‹c > 0› unfolding r_def by auto
define Bgood where "Bgood = {x ∈ space M. (λn. birkhoff_sum f n x / n) ⇢ 0}"
have [measurable]: "Bgood ∈ sets M" unfolding Bgood_def by auto
have *: "AE x in M. x ∈ Bgood" unfolding Bgood_def using birkhoff_theorem_AE_nonergodic[OF assms(1)] assms(2) by auto
then have "emeasure M Bgood = emeasure M (space M)"
by (intro emeasure_eq_AE) auto
{
fix x assume "x ∈ Bgood"
then have "x ∈ space M" unfolding Bgood_def by auto
have "(λn. birkhoff_sum f n x / n) ⇢ 0" using ‹x ∈ Bgood› unfolding Bgood_def by auto
moreover have "0 ∈ {-r<..<r}" "open {-r<..<r}" using ‹r>0› by auto
ultimately have "eventually (λn. birkhoff_sum f n x / n ∈ {-r<..<r}) sequentially"
using topological_tendstoD by blast
then obtain n0 where n0: "n0>0" "⋀n. n ≥ n0 ⟹ birkhoff_sum f n x / n ∈ {-r<..<r}"
using eventually_sequentially by (metis (mono_tags, lifting) le0 le_simps(3) neq0_conv)
{
fix n assume "n ≥ n0"
then have "n>0" using ‹n0>0› by auto
with n0(2)[OF ‹n ≥ n0›] have "abs(birkhoff_sum f n x / n) ≤ r" by auto
then have "abs(birkhoff_sum f n x) ≤ r * n" using ‹n>0› by (simp add: divide_le_eq)
}
then have "x ∈ (⋃n0. {x ∈ space M. ∀n∈{n0..}. abs(birkhoff_sum f n x) ≤ r * n})" using ‹x ∈ space M› by blast
}
then have "AE x in M. x ∉ space M - (⋃n0. {x ∈ space M. ∀n∈{n0..}. abs(birkhoff_sum f n x) ≤ r * n})"
using * by auto
then have eqM: "emeasure M (⋃n0. {x ∈ space M. ∀n∈{n0..}. abs(birkhoff_sum f n x) ≤ r * n}) = emeasure M (space M)"
by (intro emeasure_eq_AE) auto
have "(λn0. emeasure M {x ∈ space M. ∀n∈{n0..}. abs(birkhoff_sum f n x) ≤ r * n} + c)
⇢ emeasure M (⋃n0. {x ∈ space M. ∀n∈{n0..}. abs(birkhoff_sum f n x) ≤ r * n}) + c"
by (intro tendsto_intros Lim_emeasure_incseq) (auto simp add: incseq_def)
moreover have "emeasure M (⋃n0. {x ∈ space M. ∀n∈{n0..}. abs(birkhoff_sum f n x) ≤ r * n}) + c > emeasure M (space M)"
using eqM ‹c > 0› emeasure_eq_measure by auto
ultimately have "eventually (λn0. emeasure M {x ∈ space M. ∀n∈{n0..}. abs(birkhoff_sum f n x) ≤ r * n} + c > emeasure M (space M)) sequentially"
unfolding order_tendsto_iff by auto
then obtain n0 where n0: "emeasure M {x ∈ space M. ∀n∈{n0..}. abs(birkhoff_sum f n x) ≤ r * n} + c > emeasure M (space M)"
using eventually_sequentially by auto
define Dx where "Dx = {x ∈ space M. ∀n∈{n0..}. abs(birkhoff_sum f n x) ≤ r * n}"
have Dx_meas [measurable]: "Dx ∈ sets M" unfolding Dx_def by auto
have "emeasure M Dx + c ≥ emeasure M (space M)" using n0 Dx_def by auto
obtain n1::nat where n1: "n1 > max n0 ((measure M (space M) * 2 * N + e*c*n0 - e*c) / (e*c-2*r*measure M (space M)))"
by (metis mult.commute mult.left_neutral numeral_One reals_Archimedean3 zero_less_numeral)
then have "n1 > n0" by auto
have n1_ineq: "n1 * (e*c-2*r*measure M (space M)) > (measure M (space M) * 2 * N + e*c*n0 - e*c)"
using n1 pos by (simp add: pos_divide_less_eq)
define D where "D = (λn. Dx × {-r*n1-N..r*n1+N} ∩ (?TS^^n)-`C)"
have Dn_meas [measurable]: "D n ∈ sets (M ⨂⇩M lborel)" for n
unfolding D_def apply (rule TS.T_intersec_meas(2)) using C_meas by auto
have "emeasure ?MS (D n) ≥ e * c" if "n ∈ {n0..n1}" for n
proof -
have "n ≥ n0" "n ≤ n1" using that by auto
{
fix x assume [simp]: "x ∈ space M"
define F where "F = {y ∈ {-r*n1-N..r*n1+N}. y + birkhoff_sum f n x ∈ Pair ((T^^n)x) -`C}"
have [measurable]: "F ∈ sets lborel" unfolding F_def by measurable
{
fix y::real
have "(?TS^^n)(x,y) = ((T^^n)x, y + birkhoff_sum f n x)"
using skew_product_real_iterates by simp
then have "(indicator C ((?TS^^n) (x,y))::ennreal) = indicator Cx ((T^^n)x) * indicator (Pair ((T^^n)x) -`C) (y + birkhoff_sum f n x)"
using C_def by (simp add: indicator_def)
moreover have "(indicator (D n) (x, y)::ennreal) = indicator Dx x * indicator {-r*n1-N..r*n1+N} y * indicator C ((?TS^^n) (x,y))"
unfolding D_def by (simp add: indicator_def)
ultimately have "(indicator (D n) (x, y)::ennreal) = indicator Dx x * indicator {-r*n1-N..r*n1+N} y
* indicator Cx ((T^^n)x) * indicator (Pair ((T^^n)x) -`C) (y + birkhoff_sum f n x)"
by (simp add: mult.assoc)
then have "(indicator (D n) (x, y)::ennreal) = indicator (Dx ∩ (T^^n)-`Cx) x * indicator F y"
unfolding F_def by (simp add: indicator_def)
}
then have "(∫⇧+y. indicator (D n) (x, y) ∂lborel) = (∫⇧+y. indicator (Dx ∩ (T^^n)-`Cx) x * indicator F y ∂lborel)"
by auto
also have "... = indicator (Dx ∩ (T^^n)-`Cx) x * (∫⇧+y. indicator F y ∂lborel)"
by (rule nn_integral_cmult, auto)
also have "... = indicator (Dx ∩ (T^^n)-`Cx) x * emeasure lborel F" using ‹F ∈ sets lborel› by auto
finally have A: "(∫⇧+y. indicator (D n) (x, y) ∂lborel) = indicator (Dx ∩ (T^^n)-`Cx) x * emeasure lborel F"
by simp
have "(∫⇧+y. indicator (D n) (x, y) ∂lborel) ≥ ennreal e * indicator (Dx ∩ (T^^n)-`Cx) x"
proof (cases)
assume "indicator (Dx ∩ (T^^n)-`Cx) x = (0::ennreal)"
then show ?thesis by auto
next
assume "¬(indicator (Dx ∩ (T^^n)-`Cx) x = (0::ennreal))"
then have "x ∈ Dx ∩ (T^^n)-`Cx" by (simp add: indicator_eq_0_iff)
then have "x ∈ Dx" "(T^^n) x ∈ Cx" by auto
then have "abs(birkhoff_sum f n x) ≤ r * n" using ‹n ∈ {n0..n1}› Dx_def by auto
then have *: "abs(birkhoff_sum f n x) ≤ r * n1" using ‹n ≤ n1› ‹r>0›
by (meson of_nat_le_iff order_trans mult_le_cancel_iff2)
have F_expr: "F = {-r*n1-N..r*n1+N} ∩ (+)(birkhoff_sum f n x) -` (Pair ((T^^n)x) -`C)"
unfolding F_def by (auto simp add: add.commute)
have "(Pair ((T^^n)x) -`C) ⊆ {real_of_int (- int N)..real N}" unfolding C_def B_def by auto
then have "((+)(birkhoff_sum f n x)) -` (Pair ((T^^n)x) -`C) ⊆ {-N-birkhoff_sum f n x..N-birkhoff_sum f n x}"
by auto
also have "... ⊆ {-r * n1 - N .. r * n1 + N}" using * by auto
finally have "F = ((+)(birkhoff_sum f n x)) -` (Pair ((T^^n)x) -`C)" unfolding F_expr by auto
then have "emeasure lborel F = emeasure lborel ((+)(birkhoff_sum f n x) -` (Pair ((T^^n)x) -`C))" by auto
also have "... = emeasure lborel (((+)(birkhoff_sum f n x) -` (Pair ((T^^n)x) -`C)) ∩ space lborel)" by simp
also have "... = emeasure (distr lborel borel ((+) (birkhoff_sum f n x))) (Pair ((T^^n)x) -`C)"
apply (rule emeasure_distr[symmetric]) using C_meas by auto
also have "... = emeasure lborel (Pair ((T^^n)x) -`C)" using lborel_distr_plus[of "birkhoff_sum f n x"] by simp
also have "... ≥ e" using Cx_fibers ‹(T^^n) x ∈ Cx› by auto
finally have "emeasure lborel F ≥ e" by auto
then show ?thesis using A by (simp add: indicator_def)
qed
}
moreover have "emeasure ?MS (D n) = (∫⇧+x. (∫⇧+y. indicator (D n) (x, y) ∂lborel) ∂M)"
using Dn_meas lborel.emeasure_pair_measure by blast
ultimately have "emeasure ?MS (D n) ≥ (∫⇧+x. ennreal e * indicator (Dx ∩ (T ^^ n) -` Cx) x ∂M)"
by (simp add: nn_integral_mono)
also have "(∫⇧+x. ennreal e * indicator (Dx ∩ (T ^^ n) -` Cx) x ∂M) = e * (∫⇧+x. indicator (Dx ∩ (T ^^ n) -` Cx) x ∂M)"
apply (rule nn_integral_cmult) using ‹e>0› by auto
also have "... = ennreal e * emeasure M (Dx ∩ (T ^^ n) -` Cx)" by simp
finally have *: "emeasure ?MS (D n) ≥ ennreal e * emeasure M (Dx ∩ (T ^^ n) -` Cx)" by auto
have "c + emeasure M (space M) ≤ emeasure M Dx + emeasure M Cx"
using ‹emeasure M Dx + c ≥ emeasure M (space M)› unfolding c_def
by (auto simp: emeasure_eq_measure ennreal_plus[symmetric] simp del: ennreal_plus)
also have "... = emeasure M Dx + emeasure M ((T^^n)--`Cx)"
by (simp add: T_vrestr_same_emeasure(2))
also have "... = emeasure M (Dx ∪ ((T^^n)--`Cx)) + emeasure M (Dx ∩ ((T^^n)--`Cx))"
by (rule emeasure_Un_Int, auto)
also have "... ≤ emeasure M (space M) + emeasure M (Dx ∩ ((T^^n)-`Cx))"
proof -
have "emeasure M (Dx ∪ ((T^^n)--`Cx)) ≤ emeasure M (space M)"
by (rule emeasure_mono, auto simp add: sets.sets_into_space)
moreover have "Dx ∩ ((T^^n)--`Cx) = Dx ∩ ((T^^n)-`Cx)"
by (simp add: vrestr_intersec_in_space)
ultimately show ?thesis by (metis add.commute add_left_mono)
qed
finally have "emeasure M (Dx ∩ ((T^^n)-`Cx)) ≥ c" by (simp add: emeasure_eq_measure)
then have "ennreal e * emeasure M (Dx ∩ ((T^^n)-`Cx)) ≥ ennreal e * c" using ‹e > 0›
using mult_left_mono by fastforce
with * show "emeasure ?MS (D n) ≥ e * c"
using ‹0<c› ‹0<e› by (auto simp: ennreal_mult[symmetric])
qed
have "¬(disjoint_family_on D {n0..n1})"
proof
assume D: "disjoint_family_on D {n0..n1}"
have "emeasure lborel {-r*n1-N..r*n1+N} = (r * real n1 + real N) - (- r * real n1 - real N)"
apply (rule emeasure_lborel_Icc) using ‹r>0› by auto
then have *: "emeasure lborel {-r*n1-N..r*n1+N} = ennreal(2 * r * real n1 + 2 * real N)"
by (auto simp: ac_simps)
have "ennreal(e * c) * (real n1 - real n0 + 1) = ennreal(e*c) * card {n0..n1}"
using ‹n1 > n0› by (auto simp: ennreal_of_nat_eq_real_of_nat Suc_diff_le ac_simps of_nat_diff)
also have "... = (∑n∈{n0..n1}. ennreal(e*c))"
by (simp add: ac_simps)
also have "... ≤ (∑n∈{n0..n1}. emeasure ?MS (D n))"
using ‹⋀n. n ∈ {n0..n1} ⟹ emeasure ?MS (D n) ≥ e * c› by (meson sum_mono)
also have "... = emeasure ?MS (⋃n∈{n0..n1}. D n)"
apply (rule sum_emeasure) using Dn_meas by (auto simp add: D)
also have "... ≤ emeasure ?MS (space M × {-r*n1-N..r*n1+N})"
apply (rule emeasure_mono) unfolding D_def using sets.sets_into_space[OF Dx_meas] by auto
also have "... = emeasure M (space M) * emeasure lborel {-r*n1-N..r*n1+N}"
by (rule sigma_finite_measure.emeasure_pair_measure_Times, auto simp add: lborel.sigma_finite_measure_axioms)
also have "... = emeasure M (space M) * ennreal(2 * r * real n1 + 2 * real N)"
using * by auto
finally have "ennreal(e * c) * (real n1- real n0+1) ≤ emeasure M (space M) * ennreal(2 * r * real n1 + 2 * real N)" by simp
then have "e*c * (real n1- real n0 + 1) ≤ measure M (space M) * (2 * r * real n1 + 2 * real N)"
using ‹0<r› ‹0<e› ‹0<c› ‹n0 < n1› emeasure_eq_measure by (auto simp: ennreal_mult'[symmetric] simp del: ennreal_plus)
then have "0 ≤ measure M (space M) * (2 * r * real n1 + 2 * real N) - e*c * (real n1- real n0 + 1)" by auto
also have "... = (measure M (space M) * 2 * N + e*c*n0 - e*c) - n1 * (e*c-2*r*measure M (space M))"
by algebra
finally have "n1 * (e*c-2*r*measure M (space M)) ≤ measure M (space M) * 2 * N + e*c*n0 - e*c"
by linarith
then show False using n1_ineq by auto
qed
then obtain n m where nm: "n<m" "D m ∩ D n ≠ {}" unfolding disjoint_family_on_def by (metis inf_sup_aci(1) linorder_cases)
define k where "k = m-n"
then have "k>0" "D (n+k) ∩ D n ≠ {}" using nm by auto
then have "((?TS^^(n+k))-`A) ∩ ((?TS^^n)-`A) ≠ {}" unfolding D_def C_def B_def by auto
moreover have "((?TS^^(n+k))-`A) ∩ ((?TS^^n)-`A) = (?TS^^n)-`(((?TS^^k)-`A) ∩ A)"
using funpow_add by (simp add: add.commute funpow_add set.compositionality)
ultimately have "((?TS^^k)-`A) ∩ A ≠ {}" by auto
then show "∃k>0. ((?TS^^k)-`A) ∩ A ≠ {}" using ‹k>0› by auto
qed
subsubsection ‹Oscillations around the limit in Birkhoff theorem›
text ‹In this paragraph, we prove that, in Birkhoff theorem with vanishing limit, the Birkhoff sums
are infinitely many times arbitrarily close to $0$, both on the positive and the negative side.
In the ergodic case, this statement implies for instance that if the Birkhoff sums of an integrable
function tend to infinity almost everywhere, then the integral of the function can not vanish, it
has to be strictly positive (while Birkhoff theorem per se does not exclude the convergence to
infinity, at a rate slower than linear). This converts a qualitative information (convergence to
infinity at an unknown rate) to a quantitative information (linear convergence to infinity). This
result (sometimes known as Atkinson's Lemma) has been reinvented many times, for instance by Kesten
and by Guivarch. It plays an important role in the study of random products of matrices.
This is essentially a consequence of the conservativity of the corresponding skew-product, proved in
\verb+skew_product_conservative+. Indeed, this implies that, starting from a small set $X \times
[-e/2, e/2]$, the skew-product comes back infinitely often to itself, which implies that the
Birkhoff sums at these times are bounded by $e$.
To show that the Birkhoff sums come back to $[0,e]$ is a little bit more tricky. Argue by
contradiction, and induce on $A \times [0,e/2]$ where $A$ is the set of points where the Birkhoff
sums don't come back to $[0,e]$. Then the second coordinate decreases strictly when one iterates the
skew product, which is not compatible with conservativity.›
lemma birkhoff_sum_small_asymp_lemma:
assumes [measurable]: "integrable M f"
and "AE x in M. real_cond_exp M Invariants f x = 0" "e>(0::real)"
shows "AE x in M. infinite {n. birkhoff_sum f n x ∈ {0..e}}"
proof -
have [measurable]: "f ∈ borel_measurable M" by auto
have [measurable]: "⋀N. {x ∈ space M. ∃N. ∀n∈{N..}. birkhoff_sum f n x ∉ {0..e}} ∈ sets M" by auto
{
fix N assume "N>(0::nat)"
define Ax where "Ax = {x ∈ space M. ∀n∈{N..}. birkhoff_sum f n x ∉ {0..e}}"
then have [measurable]: "Ax ∈ sets M" by auto
define A where "A = Ax × {0..e/2}"
then have A_meas [measurable]: "A ∈ sets (M ⨂⇩M lborel)" by auto
define TN where "TN = T^^N"
interpret TN: fmpt M TN
unfolding TN_def using fmpt_power by auto
define fN where "fN = birkhoff_sum f N"
have "TN.birkhoff_sum fN n x = birkhoff_sum f (n*N) x" for n x
proof (induction n)
case 0
then show ?case by auto
next
case (Suc n)
have "TN.birkhoff_sum fN (Suc n) x = TN.birkhoff_sum fN n x + fN ((TN^^n) x)"
using TN.birkhoff_sum_cocycle[of fN n 1] by auto
also have "... = birkhoff_sum f (n*N) x + birkhoff_sum f N ((TN^^n) x)"
using Suc.IH fN_def by auto
also have "... = birkhoff_sum f (n*N+N) x" unfolding TN_def
by (subst funpow_mult, subst mult.commute[of N n], rule birkhoff_sum_cocycle[of f "n*N" N x, symmetric])
finally show ?case by (simp add: add.commute)
qed
then have not0e: "⋀x n. x ∈ Ax ⟹ n ≠ 0 ⟹ TN.birkhoff_sum fN n x ∉ {0..e}" unfolding Ax_def by auto
let ?TS = "(λ(x,y). (T x, y + f x))"
let ?MS = "M ⨂⇩M (lborel::real measure)"
interpret TS: conservative_mpt ?MS ?TS
by (rule skew_product_conservative, auto simp add: assms)
let ?TSN = "(λ(x,y). (TN x, y + fN x))"
have *:"?TSN = ?TS^^N" unfolding TN_def fN_def using skew_product_real_iterates by auto
interpret TSN: conservative_mpt ?MS ?TSN
using * TS.conservative_mpt_power by auto
define MA TA where "MA = restrict_space ?MS A" and "TA = TSN.induced_map A"
interpret TA: conservative_mpt MA TA unfolding MA_def TA_def
by (rule TSN.induced_map_conservative_mpt, measurable)
have *: "⋀ x y. snd (TA (x,y)) = snd (x,y) + TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x"
unfolding TA_def TSN.induced_map_def using TN.skew_product_real_iterates Pair_def by auto
have [measurable]: "snd ∈ borel_measurable ?MS" by auto
then have [measurable]: "snd ∈ borel_measurable MA" unfolding MA_def using measurable_restrict_space1 by blast
have "AE z in MA. z ∈ TSN.recurrent_subset A"
unfolding MA_def using TSN.induced_map_recurrent_typical(1)[OF A_meas].
moreover
{
fix z assume z: "z ∈ TSN.recurrent_subset A"
define x y where "x = fst z" and "y = snd z"
then have "z = (x,y)" by simp
have "z ∈ A" using z "TSN.recurrent_subset_incl"(1) by auto
then have "x ∈ Ax" "y ∈ {0..e/2}" unfolding A_def x_def y_def by auto
define y2 where "y2 = y + TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x"
have "y2 = snd (TA z)" unfolding y2_def using * ‹z = (x, y)› by force
moreover have "TA z ∈ A" unfolding TA_def using ‹z ∈ A› TSN.induced_map_stabilizes_A by auto
ultimately have "y2 ∈ {0..e/2}" unfolding A_def by auto
have "TSN.return_time_function A (x,y) ≠ 0"
using ‹z = (x,y)› ‹z ∈ TSN.recurrent_subset A› TSN.return_time0[of A] by fast
then have "TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x ∉ {0..e}"
using not0e[OF ‹x ∈ Ax›] by auto
moreover have "TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x ∈ {-e..e}"
using ‹y ∈ {0..e/2}› ‹y2 ∈ {0..e/2}› y2_def by auto
ultimately have "TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x ∈ {-e..<0}"
by auto
then have "y2 < y" using y2_def by auto
then have "snd(TA z) < snd z" unfolding y_def using ‹y2 = snd (TA z)› by auto
}
ultimately have a: "AE z in MA. snd(TA z) < snd z" by auto
then have "AE z in MA. snd(TA z) ≤ snd z" by auto
then have "AE z in MA. snd(TA z) = snd z" using TA.AE_decreasing_then_invariant[of snd] by auto
then have "AE z in MA. False" using a by auto
then have "space MA ∈ null_sets MA" by (simp add: AE_iff_null_sets)
then have "emeasure MA A = 0" by (metis A_meas MA_def null_setsD1 space_restrict_space2)
then have "emeasure ?MS A = 0" unfolding MA_def
by (metis A_meas emeasure_restrict_space sets.sets_into_space sets.top space_restrict_space space_restrict_space2)
moreover have "emeasure ?MS A = emeasure M Ax * emeasure lborel {0..e/2}"
unfolding A_def by (intro lborel.emeasure_pair_measure_Times) auto
ultimately have "emeasure M {x ∈ space M. ∀n∈{N..}. birkhoff_sum f n x ∉ {0..e}} = 0" using ‹e>0› Ax_def by simp
then have "{x ∈ space M. ∀n∈{N..}. birkhoff_sum f n x ∉ {0..e}} ∈ null_sets M" by auto
}
then have "(⋃N∈{0<..}. {x ∈ space M. ∀n∈{N..}. birkhoff_sum f n x ∉ {0..e}}) ∈ null_sets M" by (auto simp: greaterThan_0)
moreover have "{x ∈ space M. ¬(infinite {n. birkhoff_sum f n x ∈ {0..e}})} ⊆ (⋃N∈{0<..}. {x ∈ space M. ∀n∈{N..}. birkhoff_sum f n x ∉ {0..e}})"
proof
fix x assume H: "x ∈ {x ∈ space M. ¬(infinite {n. birkhoff_sum f n x ∈ {0..e}})}"
then have "x ∈ space M" by auto
have *: "finite {n. birkhoff_sum f n x ∈ {0..e}}" using H by auto
then obtain N where "⋀n. n ≥ N ⟹ n ∉ {n. birkhoff_sum f n x ∈ {0..e}}"
by (metis finite_nat_set_iff_bounded not_less)
then have "x ∈ {x ∈ space M. ∀n∈{N+1..}. birkhoff_sum f n x ∉ {0..e}}" using ‹x ∈ space M› by auto
moreover have "N+1>0" by auto
ultimately show "x ∈ (⋃N∈{0<..}. {x ∈ space M. ∀n∈{N..}. birkhoff_sum f n x ∉ {0..e}})" by auto
qed
ultimately show ?thesis unfolding eventually_ae_filter by auto
qed
theorem birkhoff_sum_small_asymp_pos_nonergodic:
assumes [measurable]: "integrable M f" and "e > (0::real)"
shows "AE x in M. infinite {n. birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x .. n * real_cond_exp M Invariants f x + e}}"
proof -
define g where "g = (λx. f x - real_cond_exp M Invariants f x)"
have g_meas [measurable]: "integrable M g" unfolding g_def using real_cond_exp_int(1)[OF assms(1)] assms(1) by auto
have "AE x in M. real_cond_exp M Invariants (real_cond_exp M Invariants f) x = real_cond_exp M Invariants f x"
by (rule real_cond_exp_F_meas, auto simp add: real_cond_exp_int(1)[OF assms(1)])
then have *: "AE x in M. real_cond_exp M Invariants g x = 0"
unfolding g_def using real_cond_exp_diff[OF assms(1) real_cond_exp_int(1)[OF assms(1)]] by auto
have "AE x in M. infinite {n. birkhoff_sum g n x ∈ {0..e}}"
by (rule birkhoff_sum_small_asymp_lemma, auto simp add: ‹e>0› * g_meas)
moreover
{
fix x assume "x ∈ space M" "infinite {n. birkhoff_sum g n x ∈ {0..e}}"
{
fix n assume H: "birkhoff_sum g n x ∈ {0..e}"
have "birkhoff_sum g n x = birkhoff_sum f n x - birkhoff_sum (real_cond_exp M Invariants f) n x"
unfolding g_def using birkhoff_sum_diff by auto
also have "... = birkhoff_sum f n x - n * real_cond_exp M Invariants f x"
using birkhoff_sum_of_invariants ‹x ∈ space M› by auto
finally have "birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x .. n * real_cond_exp M Invariants f x + e}" using H by simp
}
then have "{n. birkhoff_sum g n x ∈ {0..e}} ⊆ {n. birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x .. n * real_cond_exp M Invariants f x + e}}"
by auto
then have "infinite {n. birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x .. n * real_cond_exp M Invariants f x + e}}"
using ‹infinite {n. birkhoff_sum g n x ∈ {0..e}}› finite_subset by blast
}
ultimately show ?thesis by auto
qed
theorem birkhoff_sum_small_asymp_neg_nonergodic:
assumes [measurable]: "integrable M f" and "e > (0::real)"
shows "AE x in M. infinite {n. birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x - e .. n * real_cond_exp M Invariants f x}}"
proof -
define g where "g = (λx. real_cond_exp M Invariants f x - f x)"
have g_meas [measurable]: "integrable M g" unfolding g_def using real_cond_exp_int(1)[OF assms(1)] assms(1) by auto
have "AE x in M. real_cond_exp M Invariants (real_cond_exp M Invariants f) x = real_cond_exp M Invariants f x"
by (rule real_cond_exp_F_meas, auto simp add: real_cond_exp_int(1)[OF assms(1)])
then have *: "AE x in M. real_cond_exp M Invariants g x = 0"
unfolding g_def using real_cond_exp_diff[OF real_cond_exp_int(1)[OF assms(1)] assms(1)] by auto
have "AE x in M. infinite {n. birkhoff_sum g n x ∈ {0..e}}"
by (rule birkhoff_sum_small_asymp_lemma, auto simp add: ‹e>0› * g_meas)
moreover
{
fix x assume "x ∈ space M" "infinite {n. birkhoff_sum g n x ∈ {0..e}}"
{
fix n assume H: "birkhoff_sum g n x ∈ {0..e}"
have "birkhoff_sum g n x = birkhoff_sum (real_cond_exp M Invariants f) n x - birkhoff_sum f n x"
unfolding g_def using birkhoff_sum_diff by auto
also have "... = n * real_cond_exp M Invariants f x - birkhoff_sum f n x"
using birkhoff_sum_of_invariants ‹x ∈ space M› by auto
finally have "birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x - e .. n * real_cond_exp M Invariants f x}" using H by simp
}
then have "{n. birkhoff_sum g n x ∈ {0..e}} ⊆ {n. birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x - e .. n * real_cond_exp M Invariants f x}}"
by auto
then have "infinite {n. birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x - e .. n * real_cond_exp M Invariants f x}}"
using ‹infinite {n. birkhoff_sum g n x ∈ {0..e}}› finite_subset by blast
}
ultimately show ?thesis by auto
qed
subsubsection ‹Conditional expectation for the induced map›
text ‹Thanks to Birkhoff theorem, one can relate conditional expectations with respect to the invariant
sigma algebra, for a map and for a corresponding induced map, as follows.›
proposition Invariants_cond_exp_induced_map:
fixes f::"'a ⇒ real"
assumes [measurable]: "A ∈ sets M" "integrable M f"
defines "MA ≡ restrict_space M A" and "TA ≡ induced_map A" and "fA ≡ induced_function A f"
shows "AE x in MA. real_cond_exp MA (qmpt.Invariants MA TA) fA x
= real_cond_exp M Invariants f x * real_cond_exp MA (qmpt.Invariants MA TA) (return_time_function A) x"
proof -
interpret A: fmpt MA TA unfolding MA_def TA_def by (rule induced_map_fmpt[OF assms(1)])
have "integrable M fA" unfolding fA_def using induced_function_integral_nonergodic(1) assms by auto
then have "integrable MA fA" unfolding MA_def
by (metis assms(1) integrable_mult_indicator integrable_restrict_space sets.Int_space_eq2)
then have a: "AE x in MA. (λn. A.birkhoff_sum fA n x / n) ⇢ real_cond_exp MA A.Invariants fA x"
using A.birkhoff_theorem_AE_nonergodic by auto
have "AE x in M. (λn. birkhoff_sum f n x / n) ⇢ real_cond_exp M Invariants f x"
using birkhoff_theorem_AE_nonergodic assms(2) by auto
then have b: "AE x in MA. (λn. birkhoff_sum f n x / n) ⇢ real_cond_exp M Invariants f x"
unfolding MA_def by (metis (mono_tags, lifting) AE_restrict_space_iff assms(1) eventually_mono sets.Int_space_eq2)
define phiA where "phiA = (λx. return_time_function A x)"
have "integrable M phiA" unfolding phiA_def using return_time_integrable by auto
then have "integrable MA phiA" unfolding MA_def
by (metis assms(1) integrable_mult_indicator integrable_restrict_space sets.Int_space_eq2)
then have c: "AE x in MA. (λn. A.birkhoff_sum (λx. real(phiA x)) n x / n) ⇢ real_cond_exp MA A.Invariants phiA x"
using A.birkhoff_theorem_AE_nonergodic by auto
have "A-recurrent_subset A ∈ null_sets M" using Poincare_recurrence_thm(1)[OF assms(1)] by auto
then have "A - recurrent_subset A ∈ null_sets MA" unfolding MA_def
by (metis Diff_subset assms(1) emeasure_restrict_space null_setsD1 null_setsD2 null_setsI sets.Int_space_eq2 sets_restrict_space_iff)
then have "AE x in MA. x ∈ recurrent_subset A"
by (simp add: AE_iff_null MA_def null_setsD2 set_diff_eq space_restrict_space2)
moreover have "⋀x. x ∈ recurrent_subset A ⟹ phiA x > 0" unfolding phiA_def using return_time0 by fastforce
ultimately have *: "AE x in MA. phiA x > 0" by auto
have d: "AE x in MA. real_cond_exp MA A.Invariants phiA x > 0"
by (rule A.real_cond_exp_gr_c, auto simp add: * ‹integrable MA phiA›)
{
fix x
assume A: "(λn. A.birkhoff_sum fA n x / n) ⇢ real_cond_exp MA A.Invariants fA x"
and B: "(λn. birkhoff_sum f n x / n) ⇢ real_cond_exp M Invariants f x"
and C: "(λn. A.birkhoff_sum (λx. real(phiA x)) n x / n) ⇢ real_cond_exp MA A.Invariants phiA x"
and D: "real_cond_exp MA A.Invariants phiA x > 0"
define R where "R = (λn. A.birkhoff_sum phiA n x)"
have D2: "ereal(real_cond_exp MA A.Invariants phiA x) > 0" using D by simp
have "⋀n. real(R n)/ n = A.birkhoff_sum (λx. real(phiA x)) n x / n" unfolding R_def A.birkhoff_sum_def by auto
moreover have "(λn. A.birkhoff_sum (λx. real(phiA x)) n x / n) ⇢ real_cond_exp MA A.Invariants phiA x" using C by auto
ultimately have Rnn: "(λn. real(R n)/n) ⇢ real_cond_exp MA A.Invariants phiA x" by presburger
have "⋀n. ereal(real(R n))/ n = ereal(A.birkhoff_sum (λx. real(phiA x)) n x / n)" unfolding R_def A.birkhoff_sum_def by auto
moreover have "(λn. ereal(A.birkhoff_sum (λx. real(phiA x)) n x / n)) ⇢ real_cond_exp MA A.Invariants phiA x" using C by auto
ultimately have i: "(λn. ereal(real(R n))/n) ⇢ real_cond_exp MA A.Invariants phiA x" by auto
have ii: "(λn. real n) ⇢ ∞" by (rule id_nat_ereal_tendsto_PInf)
have iii: "(λn. ereal(real(R n))/n * real n) ⇢ ∞" using tendsto_mult_ereal_PInf[OF i D2 ii] by simp
have "⋀n. n > 0 ⟹ ereal(real(R n))/n * real n = R n" by auto
then have "eventually (λn. ereal(real(R n))/n * real n = R n) sequentially" using eventually_at_top_dense by auto
then have "(λn. real(R n)) ⇢ ∞" using iii by (simp add: filterlim_cong)
then have "(λn. birkhoff_sum f (R n) x / (R n)) ⇢ real_cond_exp M Invariants f x" using limit_along_weak_subseq B by auto
then have l: "(λn. (birkhoff_sum f (R n) x / (R n)) * ((R n)/n)) ⇢ real_cond_exp M Invariants f x * real_cond_exp MA A.Invariants phiA x"
by (rule tendsto_mult, simp add: Rnn)
obtain N where N: "⋀n. n > N ⟹ R n > 0" using ‹(λn. real(R n)) ⇢ ∞›
by (metis (full_types) eventually_at_top_dense filterlim_iff filterlim_weak_subseq)
then have "⋀n. n > N ⟹ (birkhoff_sum f (R n) x / (R n)) * ((R n)/n) = birkhoff_sum f (R n) x / n"
by auto
then have "eventually (λn. (birkhoff_sum f (R n) x / (R n)) * ((R n)/n) = birkhoff_sum f (R n) x / n) sequentially"
by simp
with tendsto_cong[OF this] have main_limit: "(λn. birkhoff_sum f (R n) x / n) ⇢ real_cond_exp M Invariants f x * real_cond_exp MA A.Invariants phiA x"
using l by auto
have "⋀n. birkhoff_sum f (R n) x = A.birkhoff_sum fA n x"
unfolding R_def fA_def phiA_def TA_def using induced_function_birkhoff_sum[OF assms(1)] by simp
then have "⋀n. birkhoff_sum f (R n) x /n = A.birkhoff_sum fA n x / n" by simp
then have "(λn. A.birkhoff_sum fA n x / n) ⇢ real_cond_exp M Invariants f x * real_cond_exp MA A.Invariants phiA x"
using main_limit by presburger
then have "real_cond_exp MA A.Invariants fA x = real_cond_exp M Invariants f x * real_cond_exp MA A.Invariants phiA x"
using A LIMSEQ_unique by auto
}
then show ?thesis using a b c d unfolding phiA_def by auto
qed
corollary Invariants_cond_exp_induced_map_0:
fixes f::"'a ⇒ real"
assumes [measurable]: "A ∈ sets M" "integrable M f" and "AE x in M. real_cond_exp M Invariants f x = 0"
defines "MA ≡ restrict_space M A" and "TA ≡ induced_map A" and "fA ≡ induced_function A f"
shows "AE x in MA. real_cond_exp MA (qmpt.Invariants MA TA) fA x = 0"
proof -
have "AE x in MA. real_cond_exp M Invariants f x = 0" unfolding MA_def
apply (subst AE_restrict_space_iff) using assms(3) by auto
then show ?thesis unfolding MA_def TA_def fA_def using Invariants_cond_exp_induced_map[OF assms(1) assms(2)]
by auto
qed
end
end
Theory Ergodicity
section ‹Ergodicity›
theory Ergodicity
imports Invariants
begin
text ‹A transformation is \emph{ergodic} if any invariant set has zero measure or full measure.
Ergodic transformations are, in a sense, extremal among measure preserving transformations.
Hence, any transformation can be seen as an average of ergodic ones. This can be made precise
by the notion of ergodic decomposition, only valid on standard measure spaces.
Many statements get nicer in the ergodic case, hence we will reformulate many
of the previous results in this setting.›
subsection ‹Ergodicity locales›
locale ergodic_qmpt = qmpt +
assumes ergodic: "⋀A. A ∈ sets Invariants ⟹ (A ∈ null_sets M ∨ space M - A ∈ null_sets M)"
locale ergodic_mpt = mpt + ergodic_qmpt
locale ergodic_fmpt = ergodic_qmpt + fmpt
locale ergodic_pmpt = ergodic_qmpt + pmpt
locale ergodic_conservative = ergodic_qmpt + conservative
locale ergodic_conservative_mpt = ergodic_qmpt + conservative_mpt
sublocale ergodic_fmpt ⊆ ergodic_mpt
by unfold_locales
sublocale ergodic_pmpt ⊆ ergodic_fmpt
by unfold_locales
sublocale ergodic_fmpt ⊆ ergodic_conservative_mpt
by unfold_locales
sublocale ergodic_conservative_mpt ⊆ ergodic_conservative
by unfold_locales
subsection ‹Behavior of sets in ergodic transformations›
text ‹The main property of an ergodic transformation, essentially equivalent to the definition,
is that a set which is almost invariant under the dynamics is null or conull.›
lemma (in ergodic_qmpt) AE_equal_preimage_then_null_or_conull:
assumes [measurable]: "A ∈ sets M" and "A Δ T--`A ∈ null_sets M"
shows "A ∈ null_sets M ∨ space M - A ∈ null_sets M"
proof -
obtain B where B: "B ∈ sets Invariants" "A Δ B ∈ null_sets M"
by (metis Un_commute Invariants_quasi_Invariants_sets[OF assms(1)] assms(2))
have [measurable]: "B ∈ sets M" using B(1) using Invariants_in_sets by blast
have *: "B ∈ null_sets M ∨ space M - B ∈ null_sets M" using ergodic B(1) by blast
show ?thesis
proof (cases)
assume "B ∈ null_sets M"
then have "A ∈ null_sets M" by (metis Un_commute B(2) Delta_null_of_null_is_null[OF assms(1), where ?A = B])
then show ?thesis by simp
next
assume "¬(B ∈ null_sets M)"
then have i: "space M - B ∈ null_sets M" using * by simp
have "(space M - B) Δ (space M - A) = A Δ B"
using sets.sets_into_space[OF ‹A ∈ sets M›] sets.sets_into_space[OF ‹B ∈ sets M›] by blast
then have "(space M - B) Δ (space M - A) ∈ null_sets M" using B(2) by auto
then have "space M - A ∈ null_sets M"
using Delta_null_of_null_is_null[where ?A = "space M - B" and ?B = "space M - A"] i by auto
then show ?thesis by simp
qed
qed
text ‹The inverse of an ergodic transformation is also ergodic.›
lemma (in ergodic_qmpt) ergodic_Tinv:
assumes "invertible_qmpt"
shows "ergodic_qmpt M Tinv"
unfolding ergodic_qmpt_def ergodic_qmpt_axioms_def
proof
show "qmpt M Tinv" using Tinv_qmpt[OF assms] by simp
show "∀A. A ∈ sets (qmpt.Invariants M Tinv) ⟶ A ∈ null_sets M ∨ space M - A ∈ null_sets M"
proof (intro allI impI)
fix A assume "A ∈ sets (qmpt.Invariants M Tinv)"
then have "A ∈ sets Invariants" using Invariants_Tinv[OF assms] by simp
then show "A ∈ null_sets M ∨ space M - A ∈ null_sets M" using ergodic by auto
qed
qed
text ‹In the conservative case, instead of the almost invariance of a set, it suffices to
assume that the preimage is contained in the set, or contains the set, to deduce that it is null
or conull.›
lemma (in ergodic_conservative) preimage_included_then_null_or_conull:
assumes "A ∈ sets M" "T--`A ⊆ A"
shows "A ∈ null_sets M ∨ space M - A ∈ null_sets M"
proof -
have "A Δ T--`A ∈ null_sets M" using preimage_included_then_almost_invariant[OF assms] by auto
then show ?thesis using AE_equal_preimage_then_null_or_conull assms(1) by auto
qed
lemma (in ergodic_conservative) preimage_includes_then_null_or_conull:
assumes "A ∈ sets M" "T--`A ⊇ A"
shows "A ∈ null_sets M ∨ space M - A ∈ null_sets M"
proof -
have "A Δ T--`A ∈ null_sets M" using preimage_includes_then_almost_invariant[OF assms] by auto
then show ?thesis using AE_equal_preimage_then_null_or_conull assms(1) by auto
qed
lemma (in ergodic_conservative) preimages_conull:
assumes [measurable]: "A ∈ sets M" and "emeasure M A > 0"
shows "space M - (⋃n. (T^^n)--`A) ∈ null_sets M"
"space M Δ (⋃n. (T^^n)--`A) ∈ null_sets M"
proof -
define B where "B = (⋃n. (T^^n)--`A)"
then have [measurable]: "B ∈ sets M" by auto
have "T--`B = (⋃n. (T^^(n+1))--`A)" unfolding B_def using T_vrestr_composed(2) by auto
then have "T--`B ⊆ B" using B_def by blast
then have *: "B ∈ null_sets M ∨ space M - B ∈ null_sets M"
using preimage_included_then_null_or_conull by auto
have "A ⊆ B" unfolding B_def using T_vrestr_0 assms(1) by blast
then have "emeasure M B > 0" using assms(2)
by (metis ‹B ∈ sets M› emeasure_eq_0 zero_less_iff_neq_zero)
then have "B ∉ null_sets M" by auto
then have i: "space M - B ∈ null_sets M" using * by auto
then show "space M - (⋃n. (T^^n)--`A) ∈ null_sets M" using B_def by auto
have "B ⊆ space M" using sets.sets_into_space[OF ‹B ∈ sets M›] by auto
then have "space M Δ B ∈ null_sets M" using i by (simp add: Diff_mono sup.absorb1)
then show "space M Δ (⋃n. (T^^n)--`A) ∈ null_sets M" using B_def by auto
qed
subsection ‹Behavior of functions in ergodic transformations›
text ‹In the same way that invariant sets are null or conull, invariant functions are almost
everywhere constant in an ergodic transformation. For real functions, one can consider
the set where $\{f x \geq d \}$, it has measure $0$ or $1$ depending on $d$.
Then $f$ is almost surely equal to the maximal $d$ such that this set has measure $1$. For functions
taking values in a general space, the argument is essentially the same, replacing intervals by
a basis of the topology.›
lemma (in ergodic_qmpt) Invariant_func_is_AE_constant:
fixes f::"_⇒ 'b::{second_countable_topology, t1_space}"
assumes "f ∈ borel_measurable Invariants"
shows "∃y. AE x in M. f x = y"
proof (cases)
assume "space M ∈ null_sets M"
obtain y::'b where "True" by auto
have "AE x in M. f x = y" using ‹space M ∈ null_sets M› AE_I' by blast
then show ?thesis by auto
next
assume *: "¬(space M ∈ null_sets M)"
obtain B::"'b set set" where B: "countable B" "topological_basis B" using ex_countable_basis by auto
define C where "C = {b ∈ B. space M - f-`b ∈ null_sets M}"
then have "countable C" using ‹countable B› by auto
define Y where "Y = ⋂ C"
have "space M - f-`Y = (⋃ b∈ C. space M - f-`b)" unfolding Y_def by auto
moreover have "⋀b. b ∈ C ⟹ space M - f-`b ∈ null_sets M" unfolding C_def by blast
ultimately have i: "space M - f-`Y ∈ null_sets M" using ‹countable C› by (metis null_sets_UN')
then have "f-`Y ≠ {}" using * by auto
then have "Y ≠ {}" by auto
then obtain y where "y ∈ Y" by auto
define D where "D = {b ∈ B. y∉b ∧ f-`b ∩ space M ∈ null_sets M}"
have "countable D" using ‹countable B› D_def by auto
{
fix z assume "z ≠ y"
obtain U where U: "open U" "z ∈ U" "y ∉ U"
using t1_space[OF ‹z ≠ y›] by blast
obtain V where "V ∈ B" "V ⊆ U" "z ∈ V" by (rule topological_basisE[OF ‹topological_basis B› ‹open U› ‹z ∈ U›])
then have "y ∉ V" using U by blast
then have "V ∉ C" using ‹y ∈ Y› Y_def by auto
then have "space M - f-`V ∩ space M ∉ null_sets M" unfolding C_def using ‹V ∈ B›
by (metis (no_types, lifting) Diff_Int2 inf.idem mem_Collect_eq)
moreover have "f-`V ∩ space M ∈ sets Invariants"
using measurable_sets[OF assms borel_open[OF topological_basis_open[OF B(2) ‹V ∈ B›]]] subalgebra_def Invariants_is_subalg by metis
ultimately have "f-`V ∩ space M ∈ null_sets M" using ergodic by auto
then have "V ∈ D" unfolding D_def using ‹V ∈ B› ‹y ∉ V› by auto
then have "∃b ∈ D. z ∈ b" using ‹z ∈ V› by auto
}
then have *: "⋃D = UNIV - {y}"
apply auto unfolding D_def by auto
have "space M - f-`{y} = f-`(UNIV -{y}) ∩ space M" by blast
also have "... = (⋃b∈D. f-`b ∩ space M)" using * by auto
also have "... ∈ null_sets M" using D_def ‹countable D›
by (metis (no_types, lifting) mem_Collect_eq null_sets_UN')
finally have "space M - f-`{y} ∈ null_sets M" by blast
with AE_not_in[OF this] have "AE x in M. x ∈ f-`{y}" by auto
then show ?thesis by auto
qed
text ‹The same goes for functions which are only almost invariant, as they coindice almost
everywhere with genuine invariant functions.›
lemma (in ergodic_qmpt) AE_Invariant_func_is_AE_constant:
fixes f::"_ ⇒ 'b::{second_countable_topology, t2_space}"
assumes "f ∈ borel_measurable M" "AE x in M. f(T x) = f x"
shows "∃y. AE x in M. f x = y"
proof -
obtain g where g: "g ∈ borel_measurable Invariants" "AE x in M. f x = g x"
using Invariants_quasi_Invariants_functions[OF assms(1)] assms(2) by auto
then obtain y where y: "AE x in M. g x = y" using Invariant_func_is_AE_constant by auto
have "AE x in M. f x = y" using g(2) y by auto
then show ?thesis by auto
qed
text ‹In conservative systems, it suffices to have an inequality between $f$ and $f \circ T$,
since such a function is almost invariant.›
lemma (in ergodic_conservative) AE_decreasing_func_is_AE_constant:
fixes f::"_ ⇒ 'b::{linorder_topology, second_countable_topology}"
assumes "AE x in M. f(T x) ≤ f x"
and [measurable]: "f ∈ borel_measurable M"
shows "∃y. AE x in M. f x = y"
proof -
have "AE x in M. f(T x) = f x" using AE_decreasing_then_invariant[OF assms] by auto
then show ?thesis using AE_Invariant_func_is_AE_constant[OF assms(2)] by auto
qed
lemma (in ergodic_conservative) AE_increasing_func_is_AE_constant:
fixes f::"_ ⇒ 'b::{linorder_topology, second_countable_topology}"
assumes "AE x in M. f(T x) ≥ f x"
and [measurable]: "f ∈ borel_measurable M"
shows "∃y. AE x in M. f x = y"
proof -
have "AE x in M. f(T x) = f x" using AE_increasing_then_invariant[OF assms] by auto
then show ?thesis using AE_Invariant_func_is_AE_constant[OF assms(2)] by auto
qed
text ‹When the function takes values in a Banach space, the value of the invariant (hence constant)
function can be recovered by integrating the function.›
lemma (in ergodic_fmpt) Invariant_func_integral:
fixes f::"_ ⇒ 'b::{banach, second_countable_topology}"
assumes [measurable]: "f ∈ borel_measurable Invariants"
shows "integrable M f"
"AE x in M. f x = (∫x. f x ∂M)/⇩R (measure M (space M))"
proof -
have [measurable]: "f ∈ borel_measurable M" using assms Invariants_measurable_func by blast
obtain y where y: "AE x in M. f x = y" using Invariant_func_is_AE_constant[OF assms] by auto
moreover have "integrable M (λx. y)" by auto
ultimately show "integrable M f" by (subst integrable_cong_AE[where ?g = "λx. y"], auto)
have "(∫x. f x ∂M) = (∫x. y ∂M)" by (subst integral_cong_AE[where ?g = "λx. y"], auto simp add: y)
also have "... = measure M (space M) *⇩R y" by auto
finally have *: "(∫x. f x ∂M) = measure M (space M) *⇩R y" by simp
show "AE x in M. f x = (∫x. f x ∂M)/⇩R (measure M (space M))"
proof (cases)
assume "emeasure M (space M) = 0"
then have "space M ∈ null_sets M" by auto
then show ?thesis using AE_I' by blast
next
assume "¬(emeasure M (space M) = 0)"
then have "measure M (space M) > 0" using emeasure_eq_measure measure_le_0_iff by fastforce
then have "y = (∫x. f x ∂M)/⇩R (measure M (space M))" using * by auto
then show ?thesis using y by auto
qed
qed
text ‹As the conditional expectation of a function and the original function have the same
integral, it follows that the conditional expectation of a function with respect to the
invariant sigma algebra is given by the average of the function.›
lemma (in ergodic_fmpt) Invariants_cond_exp_is_integral_fmpt:
fixes f::"_ ⇒ real"
assumes "integrable M f"
shows "AE x in M. real_cond_exp M Invariants f x = (∫x. f x ∂M) / measure M (space M)"
proof -
have "AE x in M. real_cond_exp M Invariants f x = (∫x. real_cond_exp M Invariants f x ∂M)/⇩R (measure M (space M))"
by (rule Invariant_func_integral(2), simp add: borel_measurable_cond_exp)
moreover have "(∫x. real_cond_exp M Invariants f x ∂M) = (∫x. f x ∂M)"
by (simp add: assms real_cond_exp_int(2))
ultimately show ?thesis by (simp add: divide_real_def mult.commute)
qed
lemma (in ergodic_pmpt) Invariants_cond_exp_is_integral:
fixes f::"_ ⇒ real"
assumes "integrable M f"
shows "AE x in M. real_cond_exp M Invariants f x = (∫x. f x ∂M)"
by (metis div_by_1 prob_space Invariants_cond_exp_is_integral_fmpt[OF assms])
subsection ‹Kac formula›
text ‹We reformulate the different versions of Kac formula. They simplify as, for any set $A$
with positive measure, the union $\bigcup T^{-n} A$ (which appears in all these statements)
almost coincides with the whole space.›
lemma (in ergodic_conservative_mpt) local_time_unbounded:
assumes [measurable]: "A ∈ sets M" "B ∈ sets M"
and "emeasure M A < ∞" "emeasure M B > 0"
shows "(λn. emeasure M {x ∈ (T^^n)--`A. local_time B n x < k}) ⇢ 0"
proof (rule local_time_unbounded3)
have "A - (⋃i. (T ^^ i) --` B) ∈ sets M" by auto
moreover have "A - (⋃i. (T ^^ i) --` B) ⊆ space M - (⋃i. (T ^^ i) --` B)" using sets.sets_into_space[OF assms(1)] by blast
ultimately show "A - (⋃i. (T ^^ i) --` B) ∈ null_sets M" by (metis null_sets_subset preimages_conull(1)[OF assms(2) assms(4)])
show "emeasure M A < ∞" using assms(3) by simp
qed (simp_all add: assms)
theorem (in ergodic_conservative_mpt) kac_formula:
assumes [measurable]: "A ∈ sets M" and "emeasure M A > 0"
shows "(∫⇧+y. return_time_function A y ∂M) = emeasure M (space M)"
proof -
have a [measurable]: "(⋃n. (T^^n)--`A) ∈ sets M" by auto
then have "space M = (⋃n. (T^^n)--`A) ∪ (space M - (⋃n. (T^^n)--`A))" using sets.sets_into_space by blast
then have "emeasure M (space M) = emeasure M (⋃n. (T^^n)--`A)"
by (metis a preimages_conull(1)[OF assms] emeasure_Un_null_set)
moreover have "(∫⇧+y. return_time_function A y ∂M) = emeasure M (⋃n. (T^^n)--`A)"
using kac_formula_nonergodic[OF assms(1)] by simp
ultimately show ?thesis by simp
qed
lemma (in ergodic_conservative_mpt) induced_function_integral:
fixes f::"'a ⇒ real"
assumes [measurable]: "A ∈ sets M" "integrable M f" and "emeasure M A > 0"
shows "integrable M (induced_function A f)"
"(∫y. induced_function A f y ∂M) = (∫ x. f x ∂M)"
proof -
show "integrable M (induced_function A f)"
using induced_function_integral_nonergodic(1)[OF assms(1) assms(2)] by auto
have "(∫y. induced_function A f y ∂M) = (∫ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
using induced_function_integral_nonergodic(2)[OF assms(1) assms(2)] by auto
also have "... = (∫ x ∈ space M. f x ∂M)"
using set_integral_null_delta[OF assms(2), where ?A = "space M" and ?B = "(⋃n. (T^^n)--`A)"]
preimages_conull(2)[OF assms(1) assms(3)] by auto
also have "... = (∫ x. f x ∂M)" using set_integral_space[OF assms(2)] by auto
finally show "(∫y. induced_function A f y ∂M) = (∫ x. f x ∂M)" by simp
qed
lemma (in ergodic_conservative_mpt) induced_function_integral_restr:
fixes f::"'a ⇒ real"
assumes [measurable]: "A ∈ sets M" "integrable M f" and "emeasure M A > 0"
shows "integrable (restrict_space M A) (induced_function A f)"
"(∫y. induced_function A f y ∂(restrict_space M A)) = (∫ x. f x ∂M)"
proof -
show "integrable (restrict_space M A) (induced_function A f)"
using induced_function_integral_restr_nonergodic(1)[OF assms(1) assms(2)] by auto
have "(∫y. induced_function A f y ∂(restrict_space M A)) = (∫ x ∈ (⋃n. (T^^n)--`A). f x ∂M)"
using induced_function_integral_restr_nonergodic(2)[OF assms(1) assms(2)] by auto
also have "... = (∫ x ∈ space M. f x ∂M)"
using set_integral_null_delta[OF assms(2), where ?A = "space M" and ?B = "(⋃n. (T^^n)--`A)"]
preimages_conull(2)[OF assms(1) assms(3)] by auto
also have "... = (∫ x. f x ∂M)" using set_integral_space[OF assms(2)] by auto
finally show "(∫y. induced_function A f y ∂(restrict_space M A)) = (∫ x. f x ∂M)" by simp
qed
subsection ‹Birkhoff theorem›
text ‹The general versions of Birkhoff theorem are formulated in terms of conditional expectations.
In ergodic probability measure preserving transformations (the most common setting), they
reduce to simpler versions that we state now, as the conditional expectations are simply the
averages of the functions.›
theorem (in ergodic_pmpt) birkhoff_theorem_AE:
fixes f::"'a ⇒ real"
assumes "integrable M f"
shows "AE x in M. (λn. birkhoff_sum f n x / n) ⇢ (∫ x. f x ∂M)"
proof -
have "AE x in M. (λn. birkhoff_sum f n x / n) ⇢ real_cond_exp M Invariants f x"
using birkhoff_theorem_AE_nonergodic[OF assms] by simp
moreover have "AE x in M. real_cond_exp M Invariants f x = (∫ x. f x ∂M)"
using Invariants_cond_exp_is_integral[OF assms] by auto
ultimately show ?thesis by auto
qed
theorem (in ergodic_pmpt) birkhoff_theorem_L1:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f"
shows "(λn. ∫⇧+x. norm(birkhoff_sum f n x / n - (∫ x. f x ∂M)) ∂M) ⇢ 0"
proof -
{
fix n::nat
have "AE x in M. real_cond_exp M Invariants f x = (∫ x. f x ∂M)"
using Invariants_cond_exp_is_integral[OF assms] by auto
then have *: "AE x in M. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x)
= norm(birkhoff_sum f n x / n - (∫ x. f x ∂M))"
by auto
have "(∫⇧+x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) ∂M)
= (∫⇧+x. norm(birkhoff_sum f n x / n - (∫ x. f x ∂M)) ∂M)"
apply (rule nn_integral_cong_AE) using * by auto
}
moreover have "(λn. ∫⇧+x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) ∂M) ⇢ 0"
using birkhoff_theorem_L1_nonergodic[OF assms] by auto
ultimately show ?thesis by simp
qed
theorem (in ergodic_pmpt) birkhoff_sum_small_asymp_pos:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f" and "e>0"
shows "AE x in M. infinite {n. birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) .. n * (∫x. f x ∂M) + e}}"
proof -
have "AE x in M. infinite {n. birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x .. n * real_cond_exp M Invariants f x + e}}"
using birkhoff_sum_small_asymp_pos_nonergodic[OF assms] by simp
moreover have "AE x in M. real_cond_exp M Invariants f x = (∫x. f x ∂M)"
using Invariants_cond_exp_is_integral[OF assms(1)] by auto
ultimately show ?thesis by auto
qed
theorem (in ergodic_pmpt) birkhoff_sum_small_asymp_neg:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f" and "e>0"
shows "AE x in M. infinite {n. birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) - e .. n * (∫x. f x ∂M)}}"
proof -
have "AE x in M. infinite {n. birkhoff_sum f n x ∈ {n * real_cond_exp M Invariants f x - e .. n * real_cond_exp M Invariants f x}}"
using birkhoff_sum_small_asymp_neg_nonergodic[OF assms] by simp
moreover have "AE x in M. real_cond_exp M Invariants f x = (∫x. f x ∂M)"
using Invariants_cond_exp_is_integral[OF assms(1)] by auto
ultimately show ?thesis by auto
qed
lemma (in ergodic_pmpt) birkhoff_positive_average:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f" and "AE x in M. (λn. birkhoff_sum f n x) ⇢ ∞"
shows "(∫ x. f x ∂M) > 0"
proof (rule ccontr)
assume "¬((∫ x. f x ∂M) > 0)"
then have *: "(∫ x. f x ∂M) ≤ 0" by simp
have "AE x in M. (λn. birkhoff_sum f n x) ⇢ ∞ ∧ infinite {n. birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) - 1 .. n* (∫x. f x ∂M)}}"
using assms(2) birkhoff_sum_small_asymp_neg[OF assms(1)] by auto
then obtain x where x: "(λn. birkhoff_sum f n x) ⇢ ∞" "infinite {n. birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) - 1 .. n* (∫x. f x ∂M)}}"
using AE_False eventually_elim2 by blast
{
fix n assume "birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) - 1 .. n * (∫x. f x ∂M)}"
then have "birkhoff_sum f n x ≤ n * (∫x. f x ∂M)" by simp
also have "... ≤ 0" using * by (simp add: mult_nonneg_nonpos)
finally have "birkhoff_sum f n x ≤ 0" by simp
}
then have "{n. birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) - 1 .. n* (∫x. f x ∂M)}} ⊆ {n. birkhoff_sum f n x ≤ 0}" by auto
then have inf: "infinite {n. birkhoff_sum f n x ≤ 0}" using x(2) finite_subset by blast
have "0 < (∞::ereal)" by auto
then have "eventually (λn. birkhoff_sum f n x > (0::ereal)) sequentially" using x(1) order_tendsto_iff by metis
then obtain N where "⋀n. n ≥ N ⟹ birkhoff_sum f n x > (0::ereal)" by (meson eventually_at_top_linorder)
then have "⋀n. n ≥ N ⟹ birkhoff_sum f n x > 0" by auto
then have "{n. birkhoff_sum f n x ≤ 0} ⊆ {..<N}" by (metis (mono_tags, lifting) lessThan_iff linorder_not_less mem_Collect_eq subsetI)
then have "finite {n. birkhoff_sum f n x ≤ 0}" using finite_nat_iff_bounded by blast
then show "False" using inf by simp
qed
lemma (in ergodic_pmpt) birkhoff_negative_average:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f" and "AE x in M. (λn. birkhoff_sum f n x) ⇢ -∞"
shows "(∫ x. f x ∂M) < 0"
proof (rule ccontr)
assume "¬((∫ x. f x ∂M) < 0)"
then have *: "(∫ x. f x ∂M) ≥ 0" by simp
have "AE x in M. (λn. birkhoff_sum f n x) ⇢ -∞ ∧ infinite {n. birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) .. n* (∫x. f x ∂M) + 1}}"
using assms(2) birkhoff_sum_small_asymp_pos[OF assms(1)] by auto
then obtain x where x: "(λn. birkhoff_sum f n x) ⇢ -∞" "infinite {n. birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) .. n* (∫x. f x ∂M) + 1}}"
using AE_False eventually_elim2 by blast
{
fix n assume "birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) .. n * (∫x. f x ∂M) + 1}"
then have "birkhoff_sum f n x ≥ n * (∫x. f x ∂M)" by simp
moreover have "n * (∫x. f x ∂M) ≥ 0" using * by simp
ultimately have "birkhoff_sum f n x ≥ 0" by simp
}
then have "{n. birkhoff_sum f n x ∈ {n * (∫x. f x ∂M) .. n* (∫x. f x ∂M) + 1}} ⊆ {n. birkhoff_sum f n x ≥ 0}" by auto
then have inf: "infinite {n. birkhoff_sum f n x ≥ 0}" using x(2) finite_subset by blast
have "0 > (-∞::ereal)" by auto
then have "eventually (λn. birkhoff_sum f n x < (0::ereal)) sequentially" using x(1) order_tendsto_iff by metis
then obtain N where "⋀n. n ≥ N ⟹ birkhoff_sum f n x < (0::ereal)" by (meson eventually_at_top_linorder)
then have "⋀n. n ≥ N ⟹ birkhoff_sum f n x < 0" by auto
then have "{n. birkhoff_sum f n x ≥ 0} ⊆ {..<N}" by (metis (mono_tags, lifting) lessThan_iff linorder_not_less mem_Collect_eq subsetI)
then have "finite {n. birkhoff_sum f n x ≥ 0}" using finite_nat_iff_bounded by blast
then show "False" using inf by simp
qed
lemma (in ergodic_pmpt) birkhoff_nonzero_average:
fixes f::"'a ⇒ real"
assumes [measurable]: "integrable M f" and "AE x in M. (λn. abs(birkhoff_sum f n x)) ⇢ ∞"
shows "(∫ x. f x ∂M) ≠ 0"
proof (rule ccontr)
assume "¬((∫ x. f x ∂M) ≠ 0)"
then have *: "(∫ x. f x ∂M) = 0" by simp
have "AE x in M. (λn. abs(birkhoff_sum f n x)) ⇢ ∞ ∧ infinite {n. birkhoff_sum f n x ∈ {0 .. 1}}"
using assms(2) birkhoff_sum_small_asymp_pos[OF assms(1)] * by auto
then obtain x where x: "(λn. abs(birkhoff_sum f n x)) ⇢ ∞" "infinite {n. birkhoff_sum f n x ∈ {0 .. 1}}"
using AE_False eventually_elim2 by blast
have "1 < (∞::ereal)" by auto
then have "eventually (λn. abs(birkhoff_sum f n x) > (1::ereal)) sequentially" using x(1) order_tendsto_iff by metis
then obtain N where "⋀n. n ≥ N ⟹ abs(birkhoff_sum f n x) > (1::ereal)" by (meson eventually_at_top_linorder)
then have *: "⋀n. n ≥ N ⟹ abs(birkhoff_sum f n x) > 1" by auto
have "{n. birkhoff_sum f n x ∈ {0..1}} ⊆ {..<N}" by (auto, metis (full_types) * abs_of_nonneg not_less)
then have "finite {n. birkhoff_sum f n x ∈ {0..1}}" using finite_nat_iff_bounded by blast
then show "False" using x(2) by simp
qed
end
Theory Shift_Operator
section ‹The shift operator on an infinite product measure›
theory Shift_Operator
imports Ergodicity ME_Library_Complement
begin
text ‹
Let ‹P› be an an infinite product of i.i.d. instances of the distribution ‹M›.
Then the shift operator is the map
\[T(x_0, x_1, x_2, \ldots) = T(x_1, x_2, \ldots)\ .\]
In this section, we define this operator and show that it is ergodic using Kolmogorov's
0--1 law.
›
locale shift_operator_ergodic = prob_space +
fixes T :: "(nat ⇒ 'a) ⇒ (nat ⇒ 'a)" and P :: "(nat ⇒ 'a) measure"
defines "T ≡ (λf. f ∘ Suc)"
defines "P ≡ PiM (UNIV :: nat set) (λ_. M)"
begin
sublocale P: product_prob_space "λ_. M" UNIV
by unfold_locales
sublocale P: prob_space P
by (simp add: prob_space_PiM prob_space_axioms P_def)
lemma measurable_T [measurable]: "T ∈ P →⇩M P"
unfolding P_def T_def o_def
by (rule measurable_abs_UNIV[OF measurable_compose[OF measurable_component_singleton]]) auto
text ‹
The ‹n›-th tail algebra $\mathcal{T}_n$ is, in some sense, the algebra in which we forget all
information about all $x_i$ with ‹i < n›. We simply change the product algebra of ‹P› by replacing
the algebra for each ‹i < n› with the trivial algebra that contains only the empty set and the
entire space.
›
definition tail_algebra :: "nat ⇒ (nat ⇒ 'a) measure"
where "tail_algebra n = PiM UNIV (λi. if i < n then trivial_measure (space M) else M)"
lemma tail_algebra_0 [simp]: "tail_algebra 0 = P"
by (simp add: tail_algebra_def P_def)
lemma space_tail_algebra [simp]: "space (tail_algebra n) = PiE UNIV (λ_. space M)"
by (simp add: tail_algebra_def space_PiM PiE_def Pi_def)
lemma measurable_P_component [measurable]: "P.random_variable M (λf. f i)"
unfolding P_def by measurable
lemma P_component [simp]: "distr P M (λf. f i) = M"
unfolding P_def by (subst P.PiM_component) auto
lemma indep_vars: "P.indep_vars (λ_. M) (λi f. f i) UNIV"
by (subst P.indep_vars_iff_distr_eq_PiM)
(simp_all add: restrict_def distr_id2 P.PiM_component P_def)
text ‹
The shift operator takes us from $\mathcal{T}_n$ to $\mathcal{T}_{n+1}$ (it forgets the
information about one more variable):
›
lemma measurable_T_tail: "T ∈ tail_algebra (Suc n) →⇩M tail_algebra n"
unfolding T_def tail_algebra_def o_def
by (rule measurable_abs_UNIV[OF measurable_compose[OF measurable_component_singleton]]) simp_all
lemma measurable_funpow_T: "T ^^ n ∈ tail_algebra (m + n) →⇩M tail_algebra m"
proof (induction n)
case (Suc n)
have "(T ^^ n) ∘ T ∈ tail_algebra (m + Suc n) →⇩M tail_algebra m"
by (rule measurable_comp[OF _ Suc]) (simp_all add: measurable_T_tail)
thus ?case by (simp add: o_def funpow_swap1)
qed auto
lemma measurable_funpow_T': "T ^^ n ∈ tail_algebra n →⇩M P"
using measurable_funpow_T[of n 0] by simp
text ‹
The shift operator is clearly measure-preserving:
›
lemma measure_preserving: "T ∈ measure_preserving P P"
proof
fix A :: "(nat ⇒ 'a) set" assume "A ∈ P.events"
hence "emeasure P (T -` A ∩ space P) = emeasure (distr P P T) A"
by (subst emeasure_distr) simp_all
also have "distr P P T = P" unfolding P_def T_def o_def
using distr_PiM_reindex[of UNIV "λ_. M" Suc UNIV] by (simp add: prob_space_axioms restrict_def)
finally show "emeasure P (T -` A ∩ space P) = emeasure P A" .
qed auto
sublocale fmpt P T
by unfold_locales
(use measure_preserving in ‹blast intro: measure_preserving_is_quasi_measure_preserving›)+
lemma indep_sets_vimage_algebra:
"P.indep_sets (λi. sets (vimage_algebra (space P) (λf. f i) M)) UNIV"
using indep_vars unfolding P.indep_vars_def sets_vimage_algebra by blast
text ‹
We can now show that the tail algebra $\mathcal{T}_n$ is a subalgebra of the algebra generated by the
algebras induced by all the variables ‹x⇩i› with ‹i ≥ n›:
›
lemma tail_algebra_subset:
"sets (tail_algebra n) ⊆
sigma_sets (space P) (⋃i∈{n..}. sets (vimage_algebra (space P) (λf. f i) M))"
proof -
have "sets (tail_algebra n) = sigma_sets (space P)
(prod_algebra UNIV (λi. if i < n then trivial_measure (space M) else M))"
by (simp add: tail_algebra_def sets_PiM PiE_def Pi_def P_def space_PiM)
also have "… ⊆ sigma_sets (space P) (⋃i∈{n..}. sets (vimage_algebra (space P) (λf. f i) M))"
proof (intro sigma_sets_mono subsetI)
fix C assume "C ∈ prod_algebra UNIV (λi. if i < n then trivial_measure (space M) else M)"
then obtain C'
where C': "C = Pi⇩E UNIV C'"
"C' ∈ (Π i∈UNIV. sets (if i < n then trivial_measure (space M) else M))"
by (elim prod_algebraE_all)
have C'_1: "C' i ∈ {{}, space M}" if "i < n" for i
using C'(2) that by (auto simp: Pi_def sets_trivial_measure split: if_splits)
have C'_2: "C' i ∈ sets M" if "i ≥ n" for i
proof -
from that have "¬(i < n)"
by auto
with C'(2) show ?thesis
by (force simp: Pi_def sets_trivial_measure split: if_splits)
qed
have "C' i ∈ events" for i
using C'_1[of i] C'_2[of i] by (cases "i ≥ n") auto
hence "C ∈ sets P"
unfolding P_def C'(1) by (intro sets_PiM_I_countable) auto
hence "C ⊆ space P"
using sets.sets_into_space by blast
show "C ∈ sigma_sets (space P) (⋃i∈{n..}. sets (vimage_algebra (space P) (λf. f i) M))"
proof (cases "C = {}")
case False
have "C = (⋂i∈{n..}. (λf. f i) -` C' i) ∩ space P"
proof (intro equalityI subsetI, goal_cases)
case (1 f)
hence "f ∈ space P"
using 1 ‹C ⊆ space P› by blast
thus ?case
using C' 1 by (auto simp: Pi_def sets_trivial_measure split: if_splits)
next
case (2 f)
hence f: "f i ∈ C' i" if "i ≥ n" for i
using that by auto
have "f i ∈ C' i" for i
proof (cases "i ≥ n")
case True
thus ?thesis using C'_2[of i] f[of i] by auto
next
case False
thus ?thesis using C'_1[of i] C'(1) ‹C ≠ {}› 2
by (auto simp: P_def space_PiM)
qed
thus "f ∈ C"
using C' by auto
qed
also have "(⋂i∈{n..}. (λf. f i) -` C' i) ∩ space P =
(⋂i∈{n..}. (λf. f i) -` C' i ∩ space P)"
by blast
also have "… ∈ sigma_sets (space P) (⋃i∈{n..}. sets (vimage_algebra (space P) (λf. f i) M))"
(is "_ ∈ ?rhs")
proof (intro sigma_sets_INTER, goal_cases)
fix i show "(λf. f i) -` C' i ∩ space P ∈ ?rhs"
proof (cases "i ≥ n")
case False
hence "C' i = {} ∨ C' i = space M"
using C'_1[of i] by auto
thus ?thesis
proof
assume [simp]: "C' i = space M"
have "space P ⊆ (λf. f i) -` C' i"
by (auto simp: P_def space_PiM)
hence "(λf. f i) -` C' i ∩ space P = space P"
by blast
thus ?thesis using sigma_sets_top
by metis
qed (auto intro: sigma_sets.Empty)
next
case i: True
have "(λf. f i) -` C' i ∩ space P ∈ sets (vimage_algebra (space P) (λf. f i) M)"
using C'_2[OF i] by (blast intro: in_vimage_algebra)
thus ?thesis using i by blast
qed
next
have "C ⊆ space P" if "C ∈ sets (vimage_algebra (space P) (λf. f i) M)" for i C
using sets.sets_into_space[OF that] by simp
thus "(⋃i∈{n..}. sets (vimage_algebra (space P) (λf. f i) M)) ⊆ Pow (space P)"
by auto
qed auto
finally show ?thesis .
qed (auto simp: sigma_sets.Empty)
qed
finally show ?thesis .
qed
text ‹
It now follows that the ‹T›-invariant events are a subset of the tail algebra induced
by the variables:
›
lemma Invariants_subset_tail_algebra:
"sets Invariants ⊆ P.tail_events (λi. sets (vimage_algebra (space P) (λf. f i) M))"
proof
fix A assume A: "A ∈ sets Invariants"
have A': "A ∈ P.events"
using A unfolding Invariants_sets by simp_all
show "A ∈ P.tail_events (λi. sets (vimage_algebra (space P) (λf. f i) M))"
unfolding P.tail_events_def
proof safe
fix n :: nat
have "vimage_restr T A = A"
using A by (simp add: Invariants_vrestr)
hence "A = vimage_restr (T ^^ n) A"
using A' by (induction n) (simp_all add: vrestr_comp)
also have "vimage_restr (T ^^ n) A = (T ^^ n) -` (A ∩ space P) ∩ space P"
unfolding vimage_restr_def ..
also have "A ∩ space P = A"
using A' by simp
also have "space P = space (tail_algebra n)"
by (simp add: P_def space_PiM)
also have "(T ^^ n) -` A ∩ space (tail_algebra n) ∈ sets (tail_algebra n)"
by (rule measurable_sets[OF measurable_funpow_T' A'])
also have "sets (tail_algebra n) ⊆
sigma_sets (space P) (⋃i∈{n..}. sets (vimage_algebra (space P) (λf. f i) M))"
by (rule tail_algebra_subset)
finally show "A ∈ sigma_sets (space P)
(⋃i∈{n..}. sets (vimage_algebra (space P) (λf. f i) M))" .
qed
qed
text ‹
A simple invocation of Kolmogorov's 0--1 law now proves that ‹T› is indeed ergodic:
›
sublocale ergodic_fmpt P T
proof
fix A assume A: "A ∈ sets Invariants"
have A': "A ∈ P.events"
using A unfolding Invariants_sets by simp_all
have "sigma_algebra (space P) (sets (vimage_algebra (space P) (λf. f i) M))" for i
by (metis sets.sigma_algebra_axioms space_vimage_algebra)
hence "P.prob A = 0 ∨ P.prob A = 1"
using indep_sets_vimage_algebra
by (rule P.kolmogorov_0_1_law) (use A Invariants_subset_tail_algebra in blast)
thus "A ∈ null_sets P ∨ space P - A ∈ null_sets P"
by (rule disj_forward) (use A'(1) P.prob_compl[of A] in ‹auto simp: P.emeasure_eq_measure›)
qed
end
endTheory Kingman
section ‹Subcocycles, subadditive ergodic theory›
theory Kingman
imports Ergodicity Fekete
begin
text ‹Subadditive ergodic theory is the branch of ergodic theory devoted
to the study of subadditive cocycles (named subcocycles in what follows), i.e.,
functions such that $u(n+m, x) \leq u(n, x) + u(m, T^n x)$ for all $x$ and $m,n$.
For instance, Birkhoff sums are examples of such subadditive cocycles (in fact, they are
additive), but more interesting examples are genuinely subadditive. The main result
of the theory is Kingman's theorem, asserting the almost sure convergence of
$u_n / n$ (this is a generalization of Birkhoff theorem). If the asymptotic average
$\lim \int u_n / n$ (which exists by subadditivity and Fekete lemma) is not $-\infty$,
then the convergence takes also place in $L^1$. We prove all this below.
›
context mpt
begin
subsection ‹Definition and basic properties›
definition subcocycle::"(nat ⇒ 'a ⇒ real) ⇒ bool"
where "subcocycle u = ((∀n. integrable M (u n)) ∧ (∀n m x. u (n+m) x ≤ u n x + u m ((T^^n) x)))"
lemma subcocycle_ineq:
assumes "subcocycle u"
shows "u (n+m) x ≤ u n x + u m ((T^^n) x)"
using assms unfolding subcocycle_def by blast
lemma subcocycle_0_nonneg:
assumes "subcocycle u"
shows "u 0 x ≥ 0"
proof -
have "u (0+0) x ≤ u 0 x + u 0 ((T^^0) x)"
using assms unfolding subcocycle_def by blast
then show ?thesis by auto
qed
lemma subcocycle_integrable:
assumes "subcocycle u"
shows "integrable M (u n)"
"u n ∈ borel_measurable M"
using assms unfolding subcocycle_def by auto
lemma subcocycle_birkhoff:
assumes "integrable M f"
shows "subcocycle (birkhoff_sum f)"
unfolding subcocycle_def by (auto simp add: assms birkhoff_sum_integral(1) birkhoff_sum_cocycle)
text ‹The set of subcocycles is stable under addition, multiplication by positive numbers,
and $\max$.›
lemma subcocycle_add:
assumes "subcocycle u" "subcocycle v"
shows "subcocycle (λn x. u n x + v n x)"
unfolding subcocycle_def
proof (auto)
fix n
show "integrable M (λx. u n x + v n x)" using assms unfolding subcocycle_def by simp
next
fix n m x
have "u (n+m) x ≤ u n x + u m ((T ^^ n) x)" using assms(1) subcocycle_def by simp
moreover have "v (n+m) x ≤ v n x + v m ((T ^^ n) x)" using assms(2) subcocycle_def by simp
ultimately show "u (n + m) x + v (n + m) x ≤ u n x + v n x + (u m ((T ^^ n) x) + v m ((T ^^ n) x))"
by simp
qed
lemma subcocycle_cmult:
assumes "subcocycle u" "c ≥ 0"
shows "subcocycle (λn x. c * u n x)"
using assms unfolding subcocycle_def by (auto, metis distrib_left mult_left_mono)
lemma subcocycle_max:
assumes "subcocycle u" "subcocycle v"
shows "subcocycle (λn x. max (u n x) (v n x))"
unfolding subcocycle_def proof (auto)
fix n
show "integrable M (λx. max (u n x) (v n x))" using assms unfolding subcocycle_def by auto
next
fix n m x
have "u (n+m) x ≤ u n x + u m ((T^^n) x)" using assms(1) unfolding subcocycle_def by auto
then show "u (n + m) x ≤ max (u n x) (v n x) + max (u m ((T ^^ n) x)) (v m ((T ^^ n) x))"
by simp
next
fix n m x
have "v (n+m) x ≤ v n x + v m ((T^^n) x)" using assms(2) unfolding subcocycle_def by auto
then show "v (n + m) x ≤ max (u n x) (v n x) + max (u m ((T ^^ n) x)) (v m ((T ^^ n) x))"
by simp
qed
text ‹Applying inductively the subcocycle equation, it follows that a subcocycle is bounded
by the Birkhoff sum of the subcocycle at time $1$.›
lemma subcocycle_bounded_by_birkhoff1:
assumes "subcocycle u" "n > 0"
shows "u n x ≤ birkhoff_sum (u 1) n x"
using ‹n > 0› proof (induction rule: ind_from_1)
case 1
show ?case by auto
next
case (Suc p)
have "u (Suc p) x ≤ u p x + u 1 ((T^^p)x)" using assms(1) subcocycle_def by (metis Suc_eq_plus1)
then show ?case using Suc birkhoff_sum_cocycle[where ?n = p and ?m = 1] ‹ p>0 › by (simp add: birkhoff_sum_def)
qed
text ‹It is often important to bound a cocycle $u_n(x)$ by the Birkhoff sums of $u_N/N$. Compared
to the trivial upper bound for $u_1$, there are additional boundary errors that make the
estimate more cumbersome (but these terms only come from a $N$-neighborhood of $0$ and $n$, so
they are negligible if $N$ is fixed and $n$ tends to infinity.›
lemma subcocycle_bounded_by_birkhoffN:
assumes "subcocycle u" "n > 2*N" "N>0"
shows "u n x ≤ birkhoff_sum (λx. u N x / real N) (n - 2 * N) x
+ (∑i<N. ¦u 1 ((T ^^ i) x)¦)
+ 2 * (∑i<2*N. ¦u 1 ((T ^^ (n - (2 * N - i))) x)¦)"
proof -
have Iar: "u (a*N+r) x ≤ u r x + (∑i<a. u N ((T^^(i * N + r))x))" for r a
proof (induction a)
case 0
then show ?case by auto
next
case (Suc a)
have "u ((a+1)*N+r) x = u((a*N+r) + N) x"
by (simp add: semiring_normalization_rules(2) semiring_normalization_rules(23))
also have "... ≤ u(a*N+r) x + u N ((T^^(a*N+r))x)"
using assms(1) unfolding subcocycle_def by auto
also have "... ≤ u r x + (∑i<a. u N ((T^^(i * N + r))x)) + u N ((T^^(a*N+r))x)"
using Suc.IH by auto
also have "... = u r x + (∑i<a+1. u N ((T^^(i * N + r))x))"
by auto
finally show ?case by auto
qed
have Ia: "u (a*N) x ≤ (∑i<a. u N ((T^^(i * N))x))" if "a>0" for a
using that proof (induction rule: ind_from_1)
case 1
show ?case by auto
next
case (Suc a)
have "u ((a+1)*N) x = u((a*N) + N) x"
by (simp add: semiring_normalization_rules(2) semiring_normalization_rules(23))
also have "... ≤ u(a*N) x + u N ((T^^(a*N))x)"
using assms(1) unfolding subcocycle_def by auto
also have "... ≤ (∑i<a. u N ((T^^(i * N))x)) + u N ((T^^(a*N))x)"
using Suc by auto
also have "... = (∑i<a+1. u N ((T^^(i * N))x))"
by auto
finally show ?case by auto
qed
define E1 where "E1 = (∑i<N. abs(u 1 ((T^^i)x)))"
define E2 where "E2 = (∑i<2*N. abs(u 1 ((T^^(n-(2*N-i))) x)))"
have "E2 ≥ 0" unfolding E2_def by auto
obtain a0 s0 where 0: "s0 < N" "n = a0 * N + s0"
using ‹0 < N› mod_div_decomp mod_less_divisor by blast
then have "a0 ≥ 1" using ‹n > 2 * N› ‹N>0›
by (metis Nat.add_0_right add.commute add_lessD1 add_mult_distrib comm_monoid_mult_class.mult_1 eq_imp_le
less_imp_add_positive less_imp_le_nat less_one linorder_neqE_nat mult.left_neutral mult_not_zero not_add_less1 one_add_one)
define a s where "a = a0-1" and "s = s0+N"
then have as: "n = a * N + s" unfolding a_def s_def using ‹a0 ≥ 1› 0 by (simp add: mult_eq_if)
have s: "s ≥ N" "s < 2*N" using 0 unfolding s_def by auto
have a: "a*N > n - 2*N" "a*N ≤ n - N" using as s ‹n > 2*N› by auto
then have "(a*N - (n-2*N)) ≤ N" using ‹n > 2*N› by auto
have "a*N ≥ n - 2*N" using a by simp
{
fix r::nat assume "r < N"
have "a*N+r > n - 2*N" using ‹n>2*N› as s by auto
define tr where "tr = n-(a*N+r)"
have "tr > 0" unfolding tr_def using as s ‹r<N› by auto
then have *: "n = (a*N+r) + tr" unfolding tr_def by auto
have "birkhoff_sum (u 1) tr ((T^^(a*N+r))x) = (∑i<tr. u 1 ((T^^(a*N+r+i))x))"
unfolding birkhoff_sum_def by (simp add: add.commute funpow_add)
also have "... = (∑i∈{a*N+r..<a*N+r+tr}. u 1 ((T^^i) x))"
by (rule sum.reindex_bij_betw, rule bij_betw_byWitness[where ?f' = "λi. i - (a * N + r)"], auto)
also have "... ≤ (∑i∈{a*N+r..<a*N+r+tr}. abs(u 1 ((T^^i) x)))"
by (simp add: sum_mono)
also have "... ≤ (∑i∈{n-2*N..<n}. abs(u 1 ((T^^i) x)))"
apply (rule sum_mono2) using as s ‹r<N› tr_def by auto
also have "... = E2" unfolding E2_def
apply (rule sum.reindex_bij_betw[symmetric], rule bij_betw_byWitness[where ?f' = "λi. i - (n-2*N)"])
using ‹n > 2*N› by auto
finally have A: "birkhoff_sum (u 1) tr ((T^^(a*N+r))x) ≤ E2" by simp
have "u n x ≤ u (a*N+r) x + u tr ((T^^(a*N+r))x)"
using assms(1) * unfolding subcocycle_def by auto
also have "... ≤ u (a*N+r) x + birkhoff_sum (u 1) tr ((T^^(a*N+r))x)"
using subcocycle_bounded_by_birkhoff1[OF assms(1)] ‹tr > 0› by auto
finally have B: "u n x ≤ u (a*N+r) x + E2"
using A by auto
have "u (a*N+r) x ≤ (∑i<N. abs(u 1 ((T^^i)x))) + (∑i<a. u N ((T^^(i*N+r))x))"
proof (cases "r = 0")
case True
then have "a>0" using ‹a*N+r > n - 2*N› not_less by fastforce
have "u(a*N+r) x ≤ (∑i<a. u N ((T^^(i*N+r))x))" using Ia[OF ‹a>0›] True by auto
moreover have "0 ≤ (∑i<N. abs(u 1 ((T^^i)x)))" by auto
ultimately show ?thesis by linarith
next
case False
then have I: "u (a*N+r) x ≤ u r x + (∑i<a. u N ((T^^(i * N + r))x))" using Iar by auto
have "u r x ≤ (∑i<r. u 1 ((T^^i)x))"
using subcocycle_bounded_by_birkhoff1[OF assms(1)] False unfolding birkhoff_sum_def by auto
also have "... ≤ (∑i<r. abs(u 1 ((T^^i)x)))"
by (simp add: sum_mono)
also have "... ≤ (∑i<N. abs(u 1 ((T^^i)x)))"
apply (rule sum_mono2) using ‹r<N› by auto
finally show ?thesis using I by auto
qed
then have "u n x ≤ E1 + (∑i<a. u N ((T^^(i*N+r))x)) + E2"
unfolding E1_def using B by auto
} note * = this
have I: "u N ((T^^j) x) ≤ E2" if "j∈{n-2*N..<a*N}" for j
proof -
have "u N ((T^^j) x) ≤ (∑i<N. u 1 ((T^^i) ((T^^j)x)))"
using subcocycle_bounded_by_birkhoff1[OF assms(1) ‹N>0›] unfolding birkhoff_sum_def by auto
also have "... = (∑i<N. u 1 ((T^^(i+j))x))" by (simp add: funpow_add)
also have "... ≤ (∑i<N. abs(u 1 ((T^^(i+j))x)))" by (rule sum_mono, auto)
also have "... = (∑k∈{j..<N+j}. abs(u 1 ((T^^k)x)))"
by (rule sum.reindex_bij_betw, rule bij_betw_byWitness[where ?f' = "λk. k-j"], auto)
also have "... ≤ (∑i∈{n-2*N..<n}. abs(u 1 ((T^^i) x)))"
apply (rule sum_mono2) using ‹j∈{n-2*N..<a*N}› ‹a*N ≤ n - N› by auto
also have "... = E2" unfolding E2_def
apply (rule sum.reindex_bij_betw[symmetric], rule bij_betw_byWitness[where ?f' = "λi. i - (n-2*N)"])
using ‹n > 2*N› by auto
finally show ?thesis by auto
qed
have "(∑j<a*N. u N ((T^^j) x)) - (∑j<n-2*N. u N ((T^^j) x)) = (∑j∈{n-2*N..<a*N}. u N ((T^^j) x))"
using sum.atLeastLessThan_concat[OF _ ‹a*N ≥ n - 2*N›, of 0 "λj. u N ((T^^j) x)", symmetric] atLeast0LessThan by simp
also have "... ≤ (∑j∈{n-2*N..<a*N}. E2)" by (rule sum_mono[OF I])
also have "... = (a*N - (n-2*N)) * E2" by simp
also have "... ≤ N * E2" using ‹(a*N - (n-2*N)) ≤ N› ‹E2 ≥ 0› by (simp add: mult_right_mono)
finally have J: "(∑j<a*N. u N ((T^^j) x)) ≤ (∑j<n-2*N. u N ((T^^j) x)) + N * E2" by auto
have "N * u n x = (∑r<N. u n x)" by auto
also have "... ≤ (∑r<N. E1 + E2 + (∑i<a. u N ((T^^(i*N+r))x)))"
apply (rule sum_mono) using * by fastforce
also have "... = (∑r<N. E1 + E2) + (∑r<N. (∑i<a. u N ((T^^(i*N+r))x)))"
by (rule sum.distrib)
also have "... = N* (E1 + E2) + (∑j<a*N. u N ((T^^j) x))"
using sum_arith_progression by auto
also have "... ≤ N *(E1+E2) + (∑j<n-2*N. u N ((T^^j) x)) + N*E2"
using J by auto
also have "... = N * (E1+E2) + N * (∑j<n-2*N. u N ((T^^j) x) / N) + N * E2"
using ‹N>0› by (simp add: sum_distrib_left)
also have "... = N*(E1 + E2 + (∑j<n-2*N. u N ((T^^j) x) / N) + E2)"
by (simp add: distrib_left)
finally have "u n x ≤ E1 + 2*E2 + birkhoff_sum (λx. u N x / N) (n-2*N) x"
unfolding birkhoff_sum_def using ‹N>0› by auto
then show ?thesis unfolding E1_def E2_def by auto
qed
text ‹Many natural cocycles are only defined almost everywhere, and then the
subadditivity property only makes sense almost everywhere. We will now show
that such an a.e.-subcocycle coincides almost everywhere with a genuine subcocycle
in the above sense. Then, all the results for subcocycles will apply to such
a.e.-subcocycles. (Usually, in ergodic theory, subcocycles only satisfy the subadditivity
property almost everywhere, but we have requested it everywhere for simplicity in the proofs.)
The subcocycle will be defined in a recursive way. This means that is can not be defined in a
proof (since complicated function definitions are not available inside proofs). Since it is
defined in terms of $u$, then $u$ has to be available at the top level, which is most
conveniently done using a context.
›
context
fixes u::"nat ⇒ 'a ⇒ real"
assumes H: "⋀m n. AE x in M. u (n+m) x ≤ u n x + u m ((T^^n) x)"
"⋀n. integrable M (u n)"
begin
private fun v :: "nat ⇒ 'a ⇒ real" where "v n x = (
if n = 0 then max (u 0 x) 0
else if n = 1 then u 1 x
else min (u n x) (Min ((λk. v k x + v (n-k) ((T^^k) x))`{0<..<n})))"
private lemma integrable_v:
"integrable M (v n)" for n
proof (induction n rule: nat_less_induct)
case (1 n)
consider "n = 0" | "n = 1" | "n>1" by linarith
then show ?case
proof (cases)
assume "n = 0"
have "v 0 x = max (u 0 x) 0" for x by simp
then show ?thesis using integrable_max[OF H(2)[of 0]] ‹n = 0› by auto
next
assume "n = 1"
have "v 1 x = u 1 x" for x by simp
then show ?thesis using H(2)[of 1] ‹n = 1› by auto
next
assume "n > 1"
hence "v n x = min (u n x) (MIN k ∈ {0<..<n}. v k x + v (n-k) ((T^^k) x))" for x
by auto
moreover have "integrable M (λx. min (u n x) (MIN k ∈ {0<..<n}. v k x + v (n-k) ((T^^k) x)))"
apply (rule integrable_min)
apply (simp add: H(2))
apply (rule integrable_MIN, simp)
using ‹n >1› apply auto[1]
apply (rule Bochner_Integration.integrable_add)
using "1.IH" apply auto[1]
apply (rule Tn_integral_preserving(1))
using "1.IH" by (metis ‹1 < n› diff_less greaterThanLessThan_iff max_0_1(2) max_less_iff_conj)
ultimately show ?case by auto
qed
qed
private lemma u_eq_v_AE:
"AE x in M. v n x = u n x" for n
proof (induction n rule: nat_less_induct)
case (1 n)
consider "n = 0" | "n = 1" | "n>1" by linarith
then show ?case
proof (cases)
assume "n = 0"
have "AE x in M. u 0 x ≤ u 0 x + u 0 x" using H(1)[of 0 0] by auto
then have "AE x in M. u 0 x ≥ 0" by auto
moreover have "v 0 x = max (u 0 x) 0" for x by simp
ultimately show ?thesis using ‹n = 0› by auto
next
assume "n = 1"
have "v 1 x = u 1 x" for x by simp
then show ?thesis using ‹n = 1› by simp
next
assume "n > 1"
{
fix k assume "k<n"
then have "AE x in M. v k x = u k x" using "1.IH" by simp
with T_AE_iterates[OF this] have "AE x in M. ∀s. v k ((T^^s) x) = u k ((T^^s) x)" by simp
} note * = this
have "AE x in M. ∀k ∈ {..<n}. ∀s. v k ((T^^s) x) = u k ((T^^s) x)"
apply (rule AE_finite_allI) using * by simp_all
moreover have "AE x in M. ∀i j. u (i+j) x ≤ u i x + u j ((T^^i) x)"
apply (subst AE_all_countable, intro allI)+ using H(1) by simp
moreover
{
fix x assume "∀k ∈ {..<n}. ∀s. v k ((T^^s) x) = u k ((T^^s) x)"
"∀i j. u (i+j) x ≤ u i x + u j ((T^^i) x)"
then have Hx: "⋀k s. k < n ⟹ v k ((T^^s) x) = u k ((T^^s) x)"
"⋀i j. u (i+j) x ≤ u i x + u j ((T^^i) x)"
by auto
{
fix k assume "k ∈ {0<..<n}"
then have K: "k<n" "n-k<n" by auto
have "u n x ≤ u k x + u (n-k) ((T^^k) x)" using Hx(2) K by (metis le_add_diff_inverse less_imp_le_nat)
also have "... = v k x + v (n-k) ((T^^k)x)" using Hx(1)[OF ‹k <n›, of 0] Hx(1)[OF ‹n-k <n›, of k] by auto
finally have "u n x ≤ v k x + v (n-k) ((T^^k)x)" by simp
}
then have *: "⋀z. z ∈ (λk. v k x + v (n-k) ((T^^k) x))`{0<..<n} ⟹ u n x ≤ z" by blast
have "u n x ≤ Min ((λk. v k x + v (n-k) ((T^^k) x))`{0<..<n})"
apply (rule Min.boundedI) using ‹n>1› * by auto
moreover have "v n x = min (u n x) (Min ((λk. v k x + v (n-k) ((T^^k) x))`{0<..<n}))"
using ‹1<n› by auto
ultimately have "v n x = u n x" by auto
}
ultimately show ?thesis by auto
qed
qed
private lemma subcocycle_v:
"v (n+m) x ≤ v n x + v m ((T^^n) x)"
proof -
consider "n = 0" | "m = 0" | "n>0 ∧ m >0" by auto
then show ?thesis
proof (cases)
case 1
then have "v n x ≥ 0" by simp
then show ?thesis using ‹n = 0› by auto
next
case 2
then have "v m x ≥ 0" by simp
then show ?thesis using ‹m = 0› by auto
next
case 3
then have "n+m > 1" by simp
then have "v (n+m) x = min (u(n+m) x) (Min ((λk. v k x + v ((n+m)-k) ((T^^k) x))`{0<..<n+m}))" by simp
also have "... ≤ Min ((λk. v k x + v ((n+m)-k) ((T^^k) x))`{0<..<n+m})" by simp
also have "... ≤ v n x + v ((n+m)-n) ((T^^n) x)"
apply (rule Min_le, simp)
by (metis (lifting) ‹0 < n ∧ 0 < m› add.commute greaterThanLessThan_iff image_iff less_add_same_cancel2)
finally show ?thesis by simp
qed
qed
lemma subcocycle_AE_in_context:
"∃w. subcocycle w ∧ (AE x in M. ∀n. w n x = u n x)"
proof -
have "subcocycle v" using subcocycle_v integrable_v unfolding subcocycle_def by auto
moreover have "AE x in M. ∀n. v n x = u n x"
by (subst AE_all_countable, intro allI, rule u_eq_v_AE)
ultimately show ?thesis by blast
qed
end
lemma subcocycle_AE:
fixes u::"nat ⇒ 'a ⇒ real"
assumes "⋀m n. AE x in M. u (n+m) x ≤ u n x + u m ((T^^n) x)"
"⋀n. integrable M (u n)"
shows "∃w. subcocycle w ∧ (AE x in M. ∀n. w n x = u n x)"
using subcocycle_AE_in_context assms by blast
subsection ‹The asymptotic average›
text ‹In this subsection, we define the asymptotic average of a subcocycle $u$, i.e., the
limit of $\int u_n(x)/n$ (the convergence follows from subadditivity of $\int u_n$) and study its
basic properties, especially in terms of operations on subcocycles. In general, it can be
$-\infty$, so we define it in the extended reals.›
definition subcocycle_avg_ereal::"(nat ⇒ 'a ⇒ real) ⇒ ereal" where
"subcocycle_avg_ereal u = Inf {ereal((∫x. u n x ∂M) / n) |n. n > 0}"
lemma subcocycle_avg_finite:
"subcocycle_avg_ereal u < ∞"
unfolding subcocycle_avg_ereal_def using Inf_less_iff less_ereal.simps(4) by blast
lemma subcocycle_avg_subadditive:
assumes "subcocycle u"
shows "subadditive (λn. (∫x. u n x ∂M))"
unfolding subadditive_def proof (intro allI)
have int_u [measurable]: "⋀n. integrable M (u n)" using assms unfolding subcocycle_def by auto
fix m n
have "(∫x. u (n+m) x ∂M) ≤ (∫x. u n x + u m ((T^^n) x) ∂M)"
apply (rule integral_mono)
using int_u apply (auto simp add: Tn_integral_preserving(1))
using assms unfolding subcocycle_def by auto
also have "... ≤ (∫x. u n x ∂M) + (∫x. u m ((T^^n) x) ∂M)"
using int_u by (auto simp add: Tn_integral_preserving(1))
also have "... = (∫x. u n x ∂M) + (∫x. u m x ∂M)"
using int_u by (auto simp add: Tn_integral_preserving(2))
finally show "(∫x. u (n+m) x ∂M) ≤ (∫x. u n x ∂M) + (∫x. u m x ∂M)" by simp
qed
lemma subcocycle_int_tendsto_avg_ereal:
assumes "subcocycle u"
shows "(λn. (∫x. u n x / n ∂M)) ⇢ subcocycle_avg_ereal u"
unfolding subcocycle_avg_ereal_def
using subadditive_converges_ereal[OF subcocycle_avg_subadditive[OF assms]] by auto
text ‹The average behaves well under addition, scalar multiplication and max, trivially.›
lemma subcocycle_avg_ereal_add:
assumes "subcocycle u" "subcocycle v"
shows "subcocycle_avg_ereal (λn x. u n x + v n x) = subcocycle_avg_ereal u + subcocycle_avg_ereal v"
proof -
have int [simp]: "⋀n. integrable M (u n)" "⋀n. integrable M (v n)" using assms unfolding subcocycle_def by auto
{
fix n
have "(∫x. u n x / n ∂M) + (∫x. v n x / n ∂M) = (∫x. u n x / n + v n x / n ∂M)"
by (rule Bochner_Integration.integral_add[symmetric], auto)
also have "... = (∫x. (u n x + v n x) / n ∂M)"
by (rule Bochner_Integration.integral_cong, auto simp add: add_divide_distrib)
finally have "ereal (∫x. u n x / n ∂M) + (∫x. v n x / n ∂M) = (∫x. (u n x + v n x) / n ∂M)"
by auto
}
moreover have "(λn. ereal (∫x. u n x / n ∂M) + (∫x. v n x / n ∂M))
⇢ subcocycle_avg_ereal u + subcocycle_avg_ereal v"
apply (intro tendsto_intros subcocycle_int_tendsto_avg_ereal[OF assms(1)] subcocycle_int_tendsto_avg_ereal[OF assms(2)])
using subcocycle_avg_finite by auto
ultimately have "(λn. (∫x. (u n x + v n x) / n ∂M)) ⇢ subcocycle_avg_ereal u + subcocycle_avg_ereal v"
by auto
moreover have "(λn. (∫x. (u n x + v n x) / n ∂M)) ⇢ subcocycle_avg_ereal (λn x. u n x + v n x)"
by (rule subcocycle_int_tendsto_avg_ereal[OF subcocycle_add[OF assms]])
ultimately show ?thesis using LIMSEQ_unique by blast
qed
lemma subcocycle_avg_ereal_cmult:
assumes "subcocycle u" "c ≥ (0::real)"
shows "subcocycle_avg_ereal (λn x. c * u n x) = c * subcocycle_avg_ereal u"
proof (cases "c = 0")
case True
have *: "ereal (∫x. (c * u n x) / n ∂M) = 0" if "n>0" for n
by (subst True, auto)
have "(λn. ereal (∫x. (c * u n x) / n ∂M)) ⇢ 0"
by (subst lim_explicit, metis * less_le_trans zero_less_one)
moreover have "(λn. ereal (∫x. (c * u n x) / n ∂M)) ⇢ subcocycle_avg_ereal (λn x. c * u n x)"
using subcocycle_int_tendsto_avg_ereal[OF subcocycle_cmult[OF assms]] by auto
ultimately have "subcocycle_avg_ereal (λn x. c * u n x) = 0"
using LIMSEQ_unique by blast
then show ?thesis using True by auto
next
case False
have int: "⋀n. integrable M (u n)" using assms unfolding subcocycle_def by auto
have "ereal (∫x. c * u n x / n ∂M) = c * ereal (∫x. u n x / n ∂M)" for n by auto
then have "(λn. c * ereal (∫x. u n x / n ∂M)) ⇢ subcocycle_avg_ereal (λn x. c * u n x)"
using subcocycle_int_tendsto_avg_ereal[OF subcocycle_cmult[OF assms]] by auto
moreover have "(λn. c * ereal (∫x. u n x / n ∂M)) ⇢ c * subcocycle_avg_ereal u"
apply (rule tendsto_mult_ereal) using False subcocycle_int_tendsto_avg_ereal[OF assms(1)] by auto
ultimately show ?thesis using LIMSEQ_unique by blast
qed
lemma subcocycle_avg_ereal_max:
assumes "subcocycle u" "subcocycle v"
shows "subcocycle_avg_ereal (λn x. max (u n x) (v n x)) ≥ max (subcocycle_avg_ereal u) (subcocycle_avg_ereal v)"
proof (auto)
have int: "integrable M (u n)" "integrable M (v n)" for n using assms unfolding subcocycle_def by auto
have int2: "integrable M (λx. max (u n x) (v n x))" for n using integrable_max int by auto
have "(∫x. u n x / n ∂M) ≤ (∫x. max (u n x) (v n x) / n ∂M)" for n
apply (rule integral_mono) using int int2 by (auto simp add: divide_simps)
then show "subcocycle_avg_ereal u ≤ subcocycle_avg_ereal (λn x. max (u n x) (v n x))"
using LIMSEQ_le[OF subcocycle_int_tendsto_avg_ereal[OF assms(1)]
subcocycle_int_tendsto_avg_ereal[OF subcocycle_max[OF assms]]] by auto
have "(∫x. v n x / n ∂M) ≤ (∫x. max (u n x) (v n x) / n ∂M)" for n
apply (rule integral_mono) using int int2 by (auto simp add: divide_simps)
then show "subcocycle_avg_ereal v ≤ subcocycle_avg_ereal (λn x. max (u n x) (v n x))"
using LIMSEQ_le[OF subcocycle_int_tendsto_avg_ereal[OF assms(2)]
subcocycle_int_tendsto_avg_ereal[OF subcocycle_max[OF assms]]] by auto
qed
text ‹For a Birkhoff sum, the average at each time is the same, equal to the average of the
function, so the asymptotic average is also equal to this common value.›
lemma subcocycle_avg_ereal_birkhoff:
assumes "integrable M u"
shows "subcocycle_avg_ereal (birkhoff_sum u) = (∫x. u x ∂M)"
proof -
have *: "ereal (∫x. (birkhoff_sum u n x) / n ∂M) = (∫x. u x ∂M)" if "n>0" for n
using birkhoff_sum_integral(2)[OF assms] that by auto
have "(λn. ereal (∫x. (birkhoff_sum u n x) / n ∂M)) ⇢ (∫x. u x ∂M)"
by (subst lim_explicit, metis * less_le_trans zero_less_one)
moreover have "(λn. ereal (∫x. (birkhoff_sum u n x) / n ∂M)) ⇢ subcocycle_avg_ereal (birkhoff_sum u)"
using subcocycle_int_tendsto_avg_ereal[OF subcocycle_birkhoff[OF assms]] by auto
ultimately show ?thesis using LIMSEQ_unique by blast
qed
text ‹In nice situations, where one can avoid the use of ereal, the following
definition is more convenient. The kind of statements we are after is as follows: if the
ereal average is finite, then something holds, likely involving the real average.
In particular, we show in this setting what we have proved above under this new assumption:
convergence (in real numbers) of the average to the asymptotic average, as well as good behavior
under sum, scalar multiplication by positive numbers, max, formula for Birkhoff sums.›
definition subcocycle_avg::"(nat ⇒ 'a ⇒ real) ⇒ real" where
"subcocycle_avg u = real_of_ereal(subcocycle_avg_ereal u)"
lemma subcocycle_avg_real_ereal:
assumes "subcocycle_avg_ereal u > - ∞"
shows "subcocycle_avg_ereal u = ereal(subcocycle_avg u)"
unfolding subcocycle_avg_def using assms subcocycle_avg_finite[of u] ereal_real by auto
lemma subcocycle_int_tendsto_avg:
assumes "subcocycle u" "subcocycle_avg_ereal u > - ∞"
shows "(λn. (∫x. u n x / n ∂M)) ⇢ subcocycle_avg u"
using subcocycle_avg_real_ereal[OF assms(2)] subcocycle_int_tendsto_avg_ereal[OF assms(1)] by auto
lemma subcocycle_avg_add:
assumes "subcocycle u" "subcocycle v" "subcocycle_avg_ereal u > - ∞" "subcocycle_avg_ereal v > - ∞"
shows "subcocycle_avg_ereal (λn x. u n x + v n x) > -∞"
"subcocycle_avg (λn x. u n x + v n x) = subcocycle_avg u + subcocycle_avg v"
using assms subcocycle_avg_finite real_of_ereal_add
unfolding subcocycle_avg_def subcocycle_avg_ereal_add[OF assms(1) assms(2)] by auto
lemma subcocycle_avg_cmult:
assumes "subcocycle u" "c ≥ (0::real)" "subcocycle_avg_ereal u > - ∞"
shows "subcocycle_avg_ereal (λn x. c * u n x) > - ∞"
"subcocycle_avg (λn x. c * u n x) = c * subcocycle_avg u"
using assms subcocycle_avg_finite unfolding subcocycle_avg_def subcocycle_avg_ereal_cmult[OF assms(1) assms(2)] by auto
lemma subcocycle_avg_max:
assumes "subcocycle u" "subcocycle v" "subcocycle_avg_ereal u > - ∞" "subcocycle_avg_ereal v > - ∞"
shows "subcocycle_avg_ereal (λn x. max (u n x) (v n x)) > -∞"
"subcocycle_avg (λn x. max (u n x) (v n x)) ≥ max (subcocycle_avg u) (subcocycle_avg v)"
proof -
show *: "subcocycle_avg_ereal (λn x. max (u n x) (v n x)) > - ∞"
using assms(3) subcocycle_avg_ereal_max[OF assms(1) assms(2)] by auto
have "ereal (subcocycle_avg (λn x. max (u n x) (v n x))) ≥ max (ereal(subcocycle_avg u)) (ereal(subcocycle_avg v))"
using subcocycle_avg_real_ereal[OF assms(3)] subcocycle_avg_real_ereal[OF assms(4)]
subcocycle_avg_real_ereal[OF *] subcocycle_avg_ereal_max[OF assms(1) assms(2)] by auto
then show "subcocycle_avg (λn x. max (u n x) (v n x)) ≥ max (subcocycle_avg u) (subcocycle_avg v)"
by auto
qed
lemma subcocycle_avg_birkhoff:
assumes "integrable M u"
shows "subcocycle_avg_ereal (birkhoff_sum u) > - ∞"
"subcocycle_avg (birkhoff_sum u) = (∫x. u x ∂M)"
unfolding subcocycle_avg_def subcocycle_avg_ereal_birkhoff[OF assms(1)] by auto
end
subsection ‹Almost sure convergence of subcocycles›
text ‹In this paragraph, we prove Kingman's theorem, i.e., the almost sure convergence of
subcocycles. Their limit is almost surely invariant. There is no really easy proof. The one we use
below is arguably the simplest known one, due to Steele (1989). The idea is to show that the limsup
of the subcocycle is bounded by the liminf (which is almost surely constant along trajectories), by
using subadditivity along time intervals where the liminf is almost reached, of length at most $N$.
For some points, the liminf takes a large time $>N$ to be reached. We neglect those times,
introducing an additional error that gets smaller with $N$, thanks to Birkhoff ergodic theorem
applied to the set of bad points. The error is most easily managed if the subcocycle is assumed to
be nonpositive, which one can assume in a first step. The general case is reduced to this one by
replacing $u_n$ with $u_n - S_n u_1 \leq 0$, and using Birkhoff theorem to control $S_n u_1$.›
context fmpt begin
text ‹First, as explained above, we prove the theorem for nonpositive subcocycles.›
lemma kingman_theorem_AE_aux1:
assumes "subcocycle u"
"⋀x. u 1 x ≤ 0"
shows "∃(g::'a⇒ereal). (g∈borel_measurable Invariants ∧ (∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))"
proof -
define l where "l = (λx. liminf (λn. u n x / n))"
have u_meas [measurable]: "⋀n. u n ∈ borel_measurable M" using assms(1) unfolding subcocycle_def by auto
have l_meas [measurable]: "l ∈ borel_measurable M" unfolding l_def by auto
{
fix x assume *: "(λn. birkhoff_sum (u 1) n x / n) ⇢ real_cond_exp M Invariants (u 1) x"
then have "(λn. birkhoff_sum (u 1) n x / n) ⇢ ereal(real_cond_exp M Invariants (u 1) x)"
by auto
then have a: "liminf (λn. birkhoff_sum (u 1) n x / n) = ereal(real_cond_exp M Invariants (u 1) x)"
using lim_imp_Liminf by force
have "ereal(u n x / n) ≤ ereal(birkhoff_sum (u 1) n x / n)" if "n>0" for n
using subcocycle_bounded_by_birkhoff1[OF assms(1) that, of x] that by (simp add: divide_right_mono)
with eventually_mono[OF eventually_gt_at_top[of 0] this]
have "eventually (λn. ereal(u n x / n) ≤ ereal(birkhoff_sum (u 1) n x / n)) sequentially" by auto
then have "liminf (λn. u n x / n) ≤ liminf (λn. birkhoff_sum (u 1) n x / n)"
by (simp add: Liminf_mono)
then have "l x < ∞" unfolding l_def using a by auto
}
then have "AE x in M. l x < ∞"
using birkhoff_theorem_AE_nonergodic[of "u 1"] subcocycle_def assms(1) by auto
have l_dec: "l x ≤ l (T x)" for x
proof -
have "l x = liminf (λn. ereal ((u (n+1) x)/(n+1)))"
unfolding l_def by (rule liminf_shift[of "λn. ereal (u n x / real n)", symmetric])
also have "... ≤ liminf (λn. ereal((u 1 x)/(n+1)) + ereal((u n (T x))/(n+1)))"
proof (rule Liminf_mono[OF eventuallyI])
fix n
have "u (1+n) x ≤ u 1 x + u n ((T^^1) x)" using assms(1) unfolding subcocycle_def by blast
then have "u (n+1) x ≤ u 1 x + u n (T x)" by auto
then have "(u (n+1) x)/(n+1) ≤ (u 1 x)/(n+1) + (u n (T x))/(n+1)"
by (metis add_divide_distrib divide_right_mono of_nat_0_le_iff)
then show "ereal ((u (n+1) x)/(n+1)) ≤ ereal((u 1 x)/(n+1)) + ereal((u n (T x))/(n+1))" by auto
qed
also have "... = 0 + liminf(λn. ereal((u n (T x))/(n+1)))"
proof (rule ereal_liminf_lim_add[of "λn. ereal((u 1 x)/real(n+1))" 0 "λn. ereal((u n (T x))/(n+1))"])
have "(λn. ereal((u 1 x)*(1/real(n+1)))) ⇢ ereal((u 1 x) * 0)"
by (intro tendsto_intros LIMSEQ_ignore_initial_segment)
then show "(λn. ereal((u 1 x)/real(n+1))) ⇢ 0" by (simp add: zero_ereal_def)
qed (simp)
also have "... = 1 * liminf(λn. ereal((u n (T x))/(n+1)))" by simp
also have "... = liminf(λn. (n+1)/n * ereal((u n (T x))/(n+1)))"
proof (rule ereal_liminf_lim_mult[symmetric])
have "real (n+1) / real n = 1 + 1/real n" if "n>0" for n by (simp add: divide_simps mult.commute that)
with eventually_mono[OF eventually_gt_at_top[of "0::nat"] this]
have "eventually (λn. real (n+1) / real n = 1 + 1/real n) sequentially" by simp
moreover have "(λn. 1 + 1/real n) ⇢ 1 + 0"
by (intro tendsto_intros)
ultimately have "(λn. real (n+1) / real n) ⇢ 1" using Lim_transform_eventually by (simp add: filterlim_cong)
then show "(λn. ereal(real (n+1) / real n)) ⇢ 1" by (simp add: one_ereal_def)
qed (auto)
also have "... = l (T x)" unfolding l_def by auto
finally show "l x ≤ l (T x)" by simp
qed
have "AE x in M. l (T x) = l x"
apply (rule AE_increasing_then_invariant) using l_dec by auto
then obtain g0 where g0: "g0 ∈ borel_measurable Invariants" "AE x in M. l x = g0 x"
using Invariants_quasi_Invariants_functions[OF l_meas] by auto
define g where "g = (λx. if g0 x = ∞ then 0 else g0 x)"
have g: "g ∈ borel_measurable Invariants" "AE x in M. g x = l x"
unfolding g_def using g0(1) ‹AE x in M. l x = g0 x› ‹AE x in M. l x < ∞› by auto
have [measurable]: "g ∈ borel_measurable M" using g(1) Invariants_measurable_func by blast
have "⋀x. g x < ∞" unfolding g_def by auto
define A where "A = {x ∈ space M. l x < ∞ ∧ (∀n. l ((T^^n) x) = g ((T^^n) x))}"
have A_meas [measurable]: "A ∈ sets M" unfolding A_def by auto
have "AE x in M. x ∈ A" unfolding A_def using T_AE_iterates[OF g(2)] ‹AE x in M. l x < ∞› by auto
then have "space M - A ∈ null_sets M" by (simp add: AE_iff_null set_diff_eq)
have l_inv: "l((T^^n) x) = l x" if "x ∈ A" for x n
proof -
have "l((T^^n) x) = g((T^^n) x)" using ‹ x ∈ A › unfolding A_def by blast
also have "... = g x" using g(1) A_def Invariants_func_is_invariant_n that by blast
also have "... = g((T^^0) x)" by auto
also have "... = l((T^^0) x)" using ‹ x ∈ A › unfolding A_def by (metis (mono_tags, lifting) mem_Collect_eq)
finally show ?thesis by auto
qed
define F where "F = (λ K e x. real_of_ereal(max (l x) (-ereal K)) + e)"
have F_meas [measurable]: "F K e ∈ borel_measurable M" for K e unfolding F_def by auto
define B where "B = (λN K e. {x ∈ A. ∃n∈{1..N}. u n x - n * F K e x < 0})"
have B_meas [measurable]: "B N K e ∈ sets M" for N K e unfolding B_def by (measurable)
define I where "I = (λN K e x. (indicator (- B N K e) x)::real)"
have I_meas [measurable]: "I N K e ∈ borel_measurable M" for N K e unfolding I_def by auto
have I_int: "integrable M (I N K e)" for N K e
unfolding I_def apply (subst integrable_cong[where ?g = "indicator (space M - B N K e)::_ ⇒ real"], auto)
by (auto split: split_indicator simp: less_top[symmetric])
have main: "AE x in M. limsup (λn. u n x / n) ≤ F K e x + abs(F K e x) * ereal(real_cond_exp M Invariants (I N K e) x)"
if "N>(1::nat)" "K>(0::real)" "e>(0::real)" for N K e
proof -
let ?B = "B N K e" and ?I = "I N K e" and ?F = "F K e"
define t where "t = (λx. if x ∈ ?B then Min {n∈{1..N}. u n x - n * ?F x < 0} else 1)"
have [measurable]: "t ∈ measurable M (count_space UNIV)" unfolding t_def by measurable
have t1: "t x ∈ {1..N}" for x
proof (cases "x ∈ ?B")
case False
then have "t x = 1" by (simp add: t_def)
then show ?thesis using ‹N>1›by auto
next
case True
let ?A = "{n∈{1..N}. u n x - n * ?F x < 0}"
have "t x = Min ?A" using True by (simp add: t_def)
moreover have "Min ?A ∈ ?A" apply (rule Min_in, simp) using True B_def by blast
ultimately show ?thesis by auto
qed
have bound1: "u (t x) x ≤ t x * ?F x + birkhoff_sum ?I (t x) x * abs(?F x)" for x
proof (cases "x ∈ ?B")
case True
let ?A = "{n∈{1..N}. u n x - n * F K e x < 0}"
have "t x = Min ?A" using True by (simp add: t_def)
moreover have "Min ?A ∈ ?A" apply (rule Min_in, simp) using True B_def by blast
ultimately have "u (t x) x ≤ (t x) * ?F x" by auto
moreover have "0 ≤ birkhoff_sum ?I (t x) x * abs(?F x)" unfolding birkhoff_sum_def I_def by (simp add: sum_nonneg)
ultimately show ?thesis by auto
next
case False
then have "0 ≤ ?F x + ?I x * abs(?F x)" unfolding I_def by auto
then have "u 1 x ≤ ?F x + ?I x * abs(?F x)" using assms(2)[of x] by auto
moreover have "t x = 1" unfolding t_def using False by auto
ultimately show ?thesis by auto
qed
define TB where "TB = (λx. (T^^(t x)) x)"
have [measurable]: "TB ∈ measurable M M" unfolding TB_def by auto
define S where "S = (λn x. (∑i<n. t((TB^^i) x)))"
have [measurable]: "S n ∈ measurable M (count_space UNIV)" for n unfolding S_def by measurable
have TB_pow: "(TB^^n) x = (T^^(S n x)) x" for n x
unfolding S_def TB_def
by (induction n, auto, metis (mono_tags, lifting) add.commute funpow_add o_apply)
have uS: "u (S n x) x ≤ (S n x) * ?F x + birkhoff_sum ?I (S n x) x * abs(?F x)" if "x ∈ A" "n>0" for x n
using ‹n > 0› proof (induction rule: ind_from_1)
case 1
show ?case unfolding S_def using bound1 by auto
next
case (Suc n)
have *: "?F((TB^^n) x) = ?F x" apply (subst TB_pow) unfolding F_def using l_inv[OF ‹x∈A›] by auto
have **: "S n x + t ((TB^^n) x) = S (Suc n) x" unfolding S_def by auto
have "u (S (Suc n) x) x = u (S n x + t((TB^^n) x)) x" unfolding S_def by auto
also have "... ≤ u (S n x) x + u (t((TB^^n) x)) ((T^^(S n x)) x)"
using assms(1) unfolding subcocycle_def by auto
also have "... ≤ u (S n x) x + u (t((TB^^n) x)) ((TB^^n) x)"
using TB_pow by auto
also have "... ≤ (S n x) * ?F x + birkhoff_sum ?I (S n x) x * abs(?F x) +
t ((TB^^n) x) * ?F ((TB^^n) x) + birkhoff_sum ?I (t ((TB^^n) x)) ((TB^^n) x) * abs(?F ((TB^^n) x))"
using Suc bound1[of "((TB^^n) x)"] by auto
also have "... = (S n x) * ?F x + birkhoff_sum ?I (S n x) x * abs(?F x) +
t ((TB^^n) x) * ?F x + birkhoff_sum ?I (t ((TB^^n) x)) ((T^^(S n x)) x) * abs(?F x)"
using * TB_pow by auto
also have "... = (real(S n x) + t ((TB^^n) x)) * ?F x +
(birkhoff_sum ?I (S n x) x + birkhoff_sum ?I (t ((TB^^n) x)) ((T^^(S n x)) x)) * abs(?F x)"
by (simp add: mult.commute ring_class.ring_distribs(1))
also have "... = (S n x + t ((TB^^n) x)) * ?F x +
(birkhoff_sum ?I (S n x) x + birkhoff_sum ?I (t ((TB^^n) x)) ((T^^(S n x)) x)) * abs(?F x)"
by simp
also have "... = (S (Suc n) x) * ?F x + birkhoff_sum ?I (S (Suc n) x) x * abs(?F x)"
by (subst birkhoff_sum_cocycle[symmetric], subst **, subst **, simp)
finally show ?case by simp
qed
have un: "u n x ≤ n * ?F x + N * abs(?F x) + birkhoff_sum ?I n x * abs(?F x)" if "x ∈ A" "n>N" for x n
proof -
let ?A = "{i. S i x > n}"
let ?iA = "Inf ?A"
have "n < (∑i<n + 1. 1)" by auto
also have "... ≤ S (n+1) x" unfolding S_def apply (rule sum_mono) using t1 by auto
finally have "?A ≠ {}" by blast
then have "?iA ∈ ?A" by (meson Inf_nat_def1)
moreover have "0 ∉ ?A" unfolding S_def by auto
ultimately have "?iA ≠ 0" by fastforce
define j where "j = ?iA - 1"
then have "j < ?iA" using ‹?iA ≠ 0› by auto
then have "j ∉ ?A" by (meson bdd_below_def cInf_lower le0 not_less)
then have "S j x ≤ n" by auto
define k where "k = n - S j x"
have "n = S j x + k" unfolding k_def using ‹S j x ≤ n› by auto
have "n < S (j+1) x" unfolding j_def using ‹?iA ≠ 0› ‹?iA ∈ ?A› by auto
also have "... = S j x + t((TB^^j) x)" unfolding S_def by auto
also have "... ≤ S j x + N" using t1 by auto
finally have "k ≤ N" unfolding k_def using ‹n > N› by auto
then have "S j x > 0" unfolding k_def using ‹n > N› by auto
then have "j > 0" unfolding S_def using not_gr0 by fastforce
have "birkhoff_sum ?I (S j x) x ≤ birkhoff_sum ?I n x"
unfolding birkhoff_sum_def I_def using ‹S j x ≤ n›
by (metis finite_Collect_less_nat indicator_pos_le lessThan_def lessThan_subset_iff sum_mono2)
have "u n x ≤ u (S j x) x"
proof (cases "k = 0")
case True
show ?thesis using True unfolding k_def using ‹S j x ≤ n› by auto
next
case False
then have "k > 0" by simp
have "u k ((T^^(S j x)) x) ≤ birkhoff_sum (u 1) k ((T ^^ S j x) x)"
using subcocycle_bounded_by_birkhoff1[OF assms(1) ‹k>0›, of "(T^^(S j x)) x"] by simp
also have "... ≤ 0" unfolding birkhoff_sum_def using sum_mono assms(2) by (simp add: sum_nonpos)
also have "u n x ≤ u (S j x) x + u k ((T^^(S j x)) x)"
apply (subst ‹n = S j x + k›) using assms(1) subcocycle_def by auto
ultimately show ?thesis by auto
qed
also have "... ≤ (S j x) * ?F x + birkhoff_sum ?I (S j x) x * abs(?F x)"
using uS[OF ‹x ∈ A› ‹j>0›] by simp
also have "... ≤ (S j x) * ?F x + birkhoff_sum ?I n x * abs(?F x)"
using ‹birkhoff_sum ?I (S j x) x ≤ birkhoff_sum ?I n x› by (simp add: mult_right_mono)
also have "... = n * ?F x - k * ?F x + birkhoff_sum ?I n x * abs(?F x)"
by (metis ‹n = S j x + k› add_diff_cancel_right' le_add2 left_diff_distrib' of_nat_diff)
also have "... ≤ n * ?F x + k * abs(?F x) + birkhoff_sum ?I n x * abs(?F x)"
by (auto, metis abs_ge_minus_self abs_mult abs_of_nat)
also have "... ≤ n * ?F x + N * abs(?F x) + birkhoff_sum ?I n x * abs(?F x)"
using ‹k ≤ N› by (simp add: mult_right_mono)
finally show ?thesis by simp
qed
have "limsup (λn. u n x / n) ≤ ?F x + limsup (λn. abs(?F x) * ereal(birkhoff_sum ?I n x / n))" if "x ∈ A" for x
proof -
have "(λn. ereal(?F x + N * abs(?F x) * (1 / n))) ⇢ ereal(?F x + N * abs (?F x) * 0)"
by (intro tendsto_intros)
then have *: "limsup (λn. ?F x + N * abs(?F x)/n) = ?F x"
using sequentially_bot tendsto_iff_Liminf_eq_Limsup by force
{
fix n assume "n > N"
have "u n x / real n ≤ ?F x + N * abs(?F x) / n + abs(?F x) * birkhoff_sum ?I n x / n"
using un[OF ‹x ∈ A› ‹n > N›] using ‹n>N› by (auto simp add: divide_simps mult.commute)
then have "ereal(u n x/n) ≤ ereal(?F x + N * abs(?F x) / n) + abs(?F x) * ereal(birkhoff_sum ?I n x / n)"
by auto
}
then have "eventually (λn. ereal(u n x / n) ≤ ereal(?F x + N * abs(?F x) / n) + abs(?F x) * ereal(birkhoff_sum ?I n x / n)) sequentially"
using eventually_mono[OF eventually_gt_at_top[of N]] by auto
with Limsup_mono[OF this]
have "limsup (λn. u n x / n) ≤ limsup (λn. ereal(?F x + N * abs(?F x) / n) + abs(?F x) * ereal(birkhoff_sum ?I n x / n))"
by auto
also have "... ≤ limsup (λn. ?F x + N * abs(?F x) / n) + limsup (λn. abs(?F x) * ereal(birkhoff_sum ?I n x / n))"
by (rule ereal_limsup_add_mono)
also have "... = ?F x + limsup (λn. abs(?F x) * ereal(birkhoff_sum ?I n x / n))"
using * by auto
finally show ?thesis by auto
qed
then have *: "AE x in M. limsup (λn. u n x / n) ≤ ?F x + limsup (λn. abs(?F x) * ereal(birkhoff_sum ?I n x / n))"
using ‹AE x in M. x ∈ A› by auto
{
fix x assume H: "(λn. birkhoff_sum ?I n x / n) ⇢ real_cond_exp M Invariants ?I x"
have "(λn. abs(?F x) * ereal(birkhoff_sum ?I n x / n)) ⇢ abs(?F x) * ereal(real_cond_exp M Invariants ?I x)"
by (rule tendsto_mult_ereal, auto simp add: H)
then have "limsup (λn. abs(?F x) * ereal(birkhoff_sum ?I n x / n)) = abs(?F x) * ereal(real_cond_exp M Invariants ?I x)"
using sequentially_bot tendsto_iff_Liminf_eq_Limsup by blast
}
moreover have "AE x in M. (λn. birkhoff_sum ?I n x / n) ⇢ real_cond_exp M Invariants ?I x"
by (rule birkhoff_theorem_AE_nonergodic[OF I_int])
ultimately have "AE x in M. limsup (λn. abs(?F x) * ereal(birkhoff_sum ?I n x / n)) = abs(?F x) * ereal(real_cond_exp M Invariants ?I x)"
by auto
then show ?thesis using * by auto
qed
have bound2: "AE x in M. limsup (λn. u n x / n) ≤ F K e x" if "K > 0" "e > 0" for K e
proof -
define C where "C = (λN. A - B N K e)"
have C_meas [measurable]: "⋀N. C N ∈ sets M" unfolding C_def by auto
{
fix x assume "x ∈ A"
have "F K e x > l x" using ‹x ∈ A› ‹e > 0› unfolding F_def A_def
by (cases "l x", auto, metis add.commute ereal_max less_add_same_cancel2 max_less_iff_conj real_of_ereal.simps(1))
then have "∃n>0. ereal(u n x / n) < F K e x" unfolding l_def using liminf_upper_bound by fastforce
then obtain n where "n>0" "ereal(u n x / n) < F K e x" by auto
then have "u n x - n * F K e x < 0" by (simp add: divide_less_eq mult.commute)
then have "x ∉ C n" unfolding C_def B_def using ‹x ∈ A› ‹n>0› by auto
then have "x ∉ (⋂n. C n)" by auto
}
then have "(⋂n. C n) = {}" unfolding C_def by auto
then have *: "0 = measure M (⋂n. C n)" by auto
have "(λn. measure M (C n)) ⇢ 0"
apply (subst *, rule finite_Lim_measure_decseq, auto) unfolding C_def B_def decseq_def by auto
moreover have "measure M (C n) = (∫x. norm(real_cond_exp M Invariants (I n K e) x) ∂M)" for n
proof -
have *: "AE x in M. 0 ≤ real_cond_exp M Invariants (I n K e) x"
apply (rule real_cond_exp_pos, auto) unfolding I_def by auto
have "measure M (C n) = (∫x. indicator (C n) x ∂M)"
by auto
also have "... = (∫x. I n K e x ∂M)"
apply (rule integral_cong_AE, auto)
unfolding C_def I_def indicator_def using ‹AE x in M. x ∈ A› by auto
also have "... = (∫x. real_cond_exp M Invariants (I n K e) x ∂M)"
by (rule real_cond_exp_int(2)[symmetric, OF I_int])
also have "... = (∫x. norm(real_cond_exp M Invariants (I n K e) x) ∂M)"
apply (rule integral_cong_AE, auto) using * by auto
finally show ?thesis by auto
qed
ultimately have *: "(λn. (∫x. norm(real_cond_exp M Invariants (I n K e) x) ∂M)) ⇢ 0" by simp
have "∃r. strict_mono r ∧ (AE x in M. (λn. real_cond_exp M Invariants (I (r n) K e) x) ⇢ 0)"
apply (rule tendsto_L1_AE_subseq) using * real_cond_exp_int[OF I_int] by auto
then obtain r where "strict_mono r" "AE x in M. (λn. real_cond_exp M Invariants (I (r n) K e) x) ⇢ 0"
by auto
moreover have "AE x in M. ∀N ∈ {1<..}. limsup (λn. u n x / n) ≤ F K e x + abs(F K e x) * ereal(real_cond_exp M Invariants (I N K e) x)"
apply (rule AE_ball_countable') using main[OF _ ‹K>0› ‹e>0›] by auto
moreover
{
fix x assume H: "(λn. real_cond_exp M Invariants (I (r n) K e) x) ⇢ 0"
"⋀N. N > 1 ⟹ limsup (λn. u n x / n) ≤ F K e x + abs(F K e x) * ereal(real_cond_exp M Invariants (I N K e) x)"
have 1: "eventually (λN. limsup (λn. u n x / n) ≤ F K e x + abs(F K e x) * ereal(real_cond_exp M Invariants (I (r N) K e) x)) sequentially"
apply (rule eventually_mono[OF eventually_gt_at_top[of 1] H(2)])
using ‹strict_mono r› less_le_trans seq_suble by blast
have 2: "(λN. F K e x + (abs(F K e x) * ereal(real_cond_exp M Invariants (I (r N) K e) x))) ⇢ ereal(F K e x) + (abs(F K e x) * ereal 0)"
by (intro tendsto_intros) (auto simp add: H(1))
have "limsup (λn. u n x / n) ≤ F K e x"
apply (rule LIMSEQ_le_const) using 1 2 by (auto simp add: eventually_at_top_linorder)
}
ultimately show "AE x in M. limsup (λn. u n x / n) ≤ F K e x" by auto
qed
have "AE x in M. limsup (λn. u n x / n) ≤ real_of_ereal(max (l x) (-ereal K))" if "K>(0::nat)" for K
apply (rule AE_upper_bound_inf_ereal) using bound2 ‹K>0› unfolding F_def by auto
then have "AE x in M. ∀K∈{(0::nat)<..}. limsup (λn. u n x / n) ≤ real_of_ereal(max (l x) (-ereal K))"
by (rule AE_ball_countable', auto)
moreover have "(λn. u n x / n) ⇢ l x"
if H: "∀K∈{(0::nat)<..}. limsup (λn. u n x / n) ≤ real_of_ereal(max (l x) (-ereal K))" for x
proof -
have "limsup (λn. u n x / n) ≤ l x"
proof (cases "l x = ∞")
case False
then have "(λK. real_of_ereal(max (l x) (-ereal K))) ⇢ l x"
using ereal_truncation_real_bottom by auto
moreover have "eventually (λK. limsup (λn. u n x / n) ≤ real_of_ereal(max (l x) (-ereal K))) sequentially"
using H by (metis (no_types, lifting) eventually_at_top_linorder eventually_gt_at_top greaterThan_iff)
ultimately show "limsup (λn. u n x / n) ≤ l x" using Lim_bounded2 eventually_sequentially by auto
qed (simp)
then have "limsup (λn. ereal (u n x / real n)) = l x"
using Liminf_le_Limsup l_def eq_iff sequentially_bot by blast
then show "(λn. u n x / n) ⇢ l x"
by (simp add: l_def tendsto_iff_Liminf_eq_Limsup)
qed
ultimately have "AE x in M. (λn. u n x / n) ⇢ l x" by auto
then have "AE x in M. (λn. u n x / n) ⇢ g x" using g(2) by auto
then show "∃(g::'a⇒ereal). (g∈borel_measurable Invariants ∧ (∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))"
using g(1) ‹⋀x. g x < ∞› by auto
qed
text ‹We deduce it for general subcocycles, by reducing to nonpositive subcocycles by subtracting
the Birkhoff sum of $u_1$ (for which the convergence follows from Birkhoff theorem).›
theorem kingman_theorem_AE_aux2:
assumes "subcocycle u"
shows "∃(g::'a⇒ereal). (g∈borel_measurable Invariants ∧ (∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))"
proof -
define v where "v = (λn x. u n x + birkhoff_sum (λx. - u 1 x) n x)"
have "subcocycle v" unfolding v_def
apply (rule subcocycle_add[OF assms], rule subcocycle_birkhoff)
using assms unfolding subcocycle_def by auto
have "∃(gv::'a⇒ereal). (gv∈borel_measurable Invariants ∧ (∀x. gv x < ∞) ∧ (AE x in M. (λn. v n x / n) ⇢ gv x))"
apply (rule kingman_theorem_AE_aux1[OF ‹subcocycle v›]) unfolding v_def by auto
then obtain gv where gv: "gv ∈ borel_measurable Invariants" "AE x in M. (λn. v n x / n) ⇢ (gv x::ereal)" "⋀x. gv x < ∞"
by blast
define g where "g = (λx. gv x + ereal(real_cond_exp M Invariants (u 1) x))"
have g_meas: "g ∈ borel_measurable Invariants" unfolding g_def using gv(1) by auto
have g_fin: "⋀x. g x < ∞" unfolding g_def using gv(3) by auto
have "AE x in M. (λn. birkhoff_sum (u 1) n x / n) ⇢ real_cond_exp M Invariants (u 1) x"
apply (rule birkhoff_theorem_AE_nonergodic) using assms unfolding subcocycle_def by auto
moreover
{
fix x assume H: "(λn. v n x / n) ⇢ (gv x)"
"(λn. birkhoff_sum (u 1) n x / n) ⇢ real_cond_exp M Invariants (u 1) x"
then have "(λn. ereal(birkhoff_sum (u 1) n x / n)) ⇢ ereal(real_cond_exp M Invariants (u 1) x)"
by auto
{
fix n
have "u n x = v n x + birkhoff_sum (u 1) n x"
unfolding v_def birkhoff_sum_def apply auto by (simp add: sum_negf)
then have "u n x / n = v n x / n + birkhoff_sum (u 1) n x / n" by (simp add: add_divide_distrib)
then have "ereal(u n x / n) = ereal(v n x / n) + ereal(birkhoff_sum (u 1) n x / n)"
by auto
} note * = this
have "(λn. ereal(u n x / n)) ⇢ g x" unfolding * g_def
apply (intro tendsto_intros) using H by auto
}
ultimately have "AE x in M. (λn. ereal(u n x / n)) ⇢ g x" using gv(2) by auto
then show ?thesis using g_meas g_fin by blast
qed
text ‹For applications, it is convenient to have a limit which is really measurable with respect
to the invariant sigma algebra and does not come from a hard to use abstract existence statement.
Hence we introduce the following definition for the would-be limit -- Kingman's theorem shows that
it is indeed a limit.
We introduce the definition for any function, not only subcocycles, but it will only be usable for
subcocycles. We introduce an if clause in the definition so that the limit is always measurable,
even when $u$ is not a subcocycle and there is no convergence.›
definition subcocycle_lim_ereal::"(nat ⇒ 'a ⇒ real) ⇒ ('a ⇒ ereal)"
where "subcocycle_lim_ereal u = (
if (∃(g::'a⇒ereal). (g∈borel_measurable Invariants ∧
(∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x)))
then (SOME (g::'a⇒ereal). g∈borel_measurable Invariants ∧
(∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))
else (λ_. 0))"
definition subcocycle_lim::"(nat ⇒ 'a ⇒ real) ⇒ ('a ⇒ real)"
where "subcocycle_lim u = (λx. real_of_ereal(subcocycle_lim_ereal u x))"
lemma subcocycle_lim_meas_Inv [measurable]:
"subcocycle_lim_ereal u ∈ borel_measurable Invariants"
"subcocycle_lim u ∈ borel_measurable Invariants"
proof -
show "subcocycle_lim_ereal u ∈ borel_measurable Invariants"
proof (cases "∃(g::'a⇒ereal). (g∈borel_measurable Invariants ∧ (∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))")
case True
then have "subcocycle_lim_ereal u = (SOME (g::'a⇒ereal). g∈borel_measurable Invariants ∧
(∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))"
unfolding subcocycle_lim_ereal_def by auto
then show ?thesis using someI_ex[OF True] by auto
next
case False
then have "subcocycle_lim_ereal u = (λ_. 0)" unfolding subcocycle_lim_ereal_def by auto
then show ?thesis by auto
qed
then show "subcocycle_lim u ∈ borel_measurable Invariants" unfolding subcocycle_lim_def by auto
qed
lemma subcocycle_lim_meas [measurable]:
"subcocycle_lim_ereal u ∈ borel_measurable M"
"subcocycle_lim u ∈ borel_measurable M"
using subcocycle_lim_meas_Inv Invariants_measurable_func apply blast
using subcocycle_lim_meas_Inv Invariants_measurable_func by blast
lemma subcocycle_lim_ereal_not_PInf:
"subcocycle_lim_ereal u x < ∞"
proof (cases "∃(g::'a⇒ereal). (g∈borel_measurable Invariants ∧ (∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))")
case True
then have "subcocycle_lim_ereal u = (SOME (g::'a⇒ereal). g∈borel_measurable Invariants ∧
(∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))"
unfolding subcocycle_lim_ereal_def by auto
then show ?thesis using someI_ex[OF True] by auto
next
case False
then have "subcocycle_lim_ereal u = (λ_. 0)" unfolding subcocycle_lim_ereal_def by auto
then show ?thesis by auto
qed
text ‹We reformulate the subadditive ergodic theorem of Kingman with this definition.
From this point on, the technical definition of \verb+subcocycle_lim_ereal+ will never be used, only
the following property will be relevant.›
theorem kingman_theorem_AE_nonergodic_ereal:
assumes "subcocycle u"
shows "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
proof -
have *: "∃(g::'a⇒ereal). (g∈borel_measurable Invariants ∧ (∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))"
using kingman_theorem_AE_aux2[OF assms] by auto
then have "subcocycle_lim_ereal u = (SOME (g::'a⇒ereal). g∈borel_measurable Invariants ∧
(∀x. g x < ∞) ∧ (AE x in M. (λn. u n x / n) ⇢ g x))"
unfolding subcocycle_lim_ereal_def by auto
then show ?thesis using someI_ex[OF *] by auto
qed
text ‹The subcocycle limit behaves well under addition, multiplication by a positive scalar,
max, and is simply the conditional expectation with respect to invariants for Birkhoff sums,
thanks to Birkhoff theorem.›
lemma subcocycle_lim_ereal_add:
assumes "subcocycle u" "subcocycle v"
shows "AE x in M. subcocycle_lim_ereal (λn x. u n x + v n x) x = subcocycle_lim_ereal u x + subcocycle_lim_ereal v x"
proof -
have "AE x in M. (λn. (u n x + v n x)/n) ⇢ subcocycle_lim_ereal (λn x. u n x + v n x) x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF subcocycle_add[OF assms]])
moreover have "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF assms(1)])
moreover have "AE x in M. (λn. v n x / n) ⇢ subcocycle_lim_ereal v x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF assms(2)])
moreover
{
fix x assume H: "(λn. (u n x + v n x)/n) ⇢ subcocycle_lim_ereal (λn x. u n x + v n x) x"
"(λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
"(λn. v n x / n) ⇢ subcocycle_lim_ereal v x"
have *: "(u n x + v n x)/n = ereal (u n x / n) + (v n x / n)" for n
by (simp add: add_divide_distrib)
have "(λn. (u n x + v n x)/n) ⇢ subcocycle_lim_ereal u x + subcocycle_lim_ereal v x"
unfolding * apply (intro tendsto_intros H(2) H(3)) using subcocycle_lim_ereal_not_PInf by auto
then have "subcocycle_lim_ereal (λn x. u n x + v n x) x = subcocycle_lim_ereal u x + subcocycle_lim_ereal v x"
using H(1) by (simp add: LIMSEQ_unique)
}
ultimately show ?thesis by auto
qed
lemma subcocycle_lim_ereal_cmult:
assumes "subcocycle u" "c≥(0::real)"
shows "AE x in M. subcocycle_lim_ereal (λn x. c * u n x) x = c * subcocycle_lim_ereal u x"
proof -
have "AE x in M. (λn. (c * u n x)/n) ⇢ subcocycle_lim_ereal (λn x. c * u n x) x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF subcocycle_cmult[OF assms]])
moreover have "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF assms(1)])
moreover
{
fix x assume H: "(λn. (c * u n x)/n) ⇢ subcocycle_lim_ereal (λn x. c * u n x) x"
"(λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
have "(λn. c * ereal (u n x / n)) ⇢ c * subcocycle_lim_ereal u x"
by (rule tendsto_cmult_ereal[OF _ H(2)], auto)
then have "subcocycle_lim_ereal (λn x. c * u n x) x = c * subcocycle_lim_ereal u x"
using H(1) by (simp add: LIMSEQ_unique)
}
ultimately show ?thesis by auto
qed
lemma subcocycle_lim_ereal_max:
assumes "subcocycle u" "subcocycle v"
shows "AE x in M. subcocycle_lim_ereal (λn x. max (u n x) (v n x)) x
= max (subcocycle_lim_ereal u x) (subcocycle_lim_ereal v x)"
proof -
have "AE x in M. (λn. max (u n x) (v n x) / n) ⇢ subcocycle_lim_ereal (λn x. max (u n x) (v n x)) x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF subcocycle_max[OF assms]])
moreover have "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF assms(1)])
moreover have "AE x in M. (λn. v n x / n) ⇢ subcocycle_lim_ereal v x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF assms(2)])
moreover
{
fix x assume H: "(λn. max (u n x) (v n x) / n) ⇢ subcocycle_lim_ereal (λn x. max (u n x) (v n x)) x"
"(λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
"(λn. v n x / n) ⇢ subcocycle_lim_ereal v x"
have "(λn. max (ereal(u n x / n)) (ereal(v n x / n)))
⇢ max (subcocycle_lim_ereal u x) (subcocycle_lim_ereal v x)"
apply (rule tendsto_max) using H by auto
moreover have "max (ereal(u n x / n)) (ereal(v n x / n)) = max (u n x) (v n x) / n" for n
by (simp del: ereal_max add:ereal_max[symmetric] max_divide_distrib_right)
ultimately have "(λn. max (u n x) (v n x) / n)
⇢ max (subcocycle_lim_ereal u x) (subcocycle_lim_ereal v x)"
by auto
then have "subcocycle_lim_ereal (λn x. max (u n x) (v n x)) x
= max (subcocycle_lim_ereal u x) (subcocycle_lim_ereal v x)"
using H(1) by (simp add: LIMSEQ_unique)
}
ultimately show ?thesis by auto
qed
lemma subcocycle_lim_ereal_birkhoff:
assumes "integrable M u"
shows "AE x in M. subcocycle_lim_ereal (birkhoff_sum u) x = ereal(real_cond_exp M Invariants u x)"
proof -
have "AE x in M. (λn. birkhoff_sum u n x / n) ⇢ real_cond_exp M Invariants u x"
by (rule birkhoff_theorem_AE_nonergodic[OF assms])
moreover have "AE x in M. (λn. birkhoff_sum u n x / n) ⇢ subcocycle_lim_ereal (birkhoff_sum u) x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF subcocycle_birkhoff[OF assms]])
moreover
{
fix x assume H: "(λn. birkhoff_sum u n x / n) ⇢ real_cond_exp M Invariants u x"
"(λn. birkhoff_sum u n x / n) ⇢ subcocycle_lim_ereal (birkhoff_sum u) x"
have "(λn. birkhoff_sum u n x / n) ⇢ ereal(real_cond_exp M Invariants u x)"
using H(1) by auto
then have "subcocycle_lim_ereal (birkhoff_sum u) x = ereal(real_cond_exp M Invariants u x)"
using H(2) by (simp add: LIMSEQ_unique)
}
ultimately show ?thesis by auto
qed
subsection ‹$L^1$ and a.e.\ convergence of subcocycles with finite asymptotic average›
text ‹In this subsection, we show that the almost sure convergence in Kingman theorem
also takes place in $L^1$ if the limit is integrable, i.e., if the asymptotic average
of the subcocycle is $> -\infty$. To deduce it from the almost sure convergence, we only need
to show that there is no loss of mass, i.e., that the integral of the limit is not
strictly larger than the limit of the integrals (thanks to Scheffe criterion). This follows
from comparison to Birkhoff sums, for which we know that the average of the limit is
the same as the average of the function.›
text ‹First, we show that the subcocycle limit is bounded by the limit of the Birkhoff sums of
$u_N$, i.e., its conditional expectation. This follows from the fact that $u_n$ is bounded by the
Birkhoff sum of $u_N$ (up to negligible boundary terms).›
lemma subcocycle_lim_ereal_atmost_uN_invariants:
assumes "subcocycle u" "N>(0::nat)"
shows "AE x in M. subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (λx. u N x / N) x"
proof -
have "AE x in M. (λn. u 1 ((T^^n) x) / n) ⇢ 0"
apply (rule limit_foTn_over_n') using assms(1) unfolding subcocycle_def by auto
moreover have "AE x in M. (λn. birkhoff_sum (λx. u N x/N) n x / n) ⇢ real_cond_exp M Invariants (λx. u N x / N) x"
apply (rule birkhoff_theorem_AE_nonergodic) using assms(1) unfolding subcocycle_def by auto
moreover have "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
by (rule kingman_theorem_AE_nonergodic_ereal[OF assms(1)])
moreover
{
fix x assume H: "(λn. u 1 ((T^^n) x) / n) ⇢ 0"
"(λn. birkhoff_sum (λx. u N x/N) n x / n) ⇢ real_cond_exp M Invariants (λx. u N x / N) x"
"(λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
let ?f = "λn. birkhoff_sum (λx. u N x / real N) (n - 2 * N) x / n
+ (∑i<N. (1/n) * ¦u 1 ((T ^^ i) x)¦)
+ 2 * (∑i<2*N. ¦u 1 ((T ^^ (n - (2 * N - i))) x)¦ / n)"
{
fix n assume "n≥2*N+1"
then have "n > 2 * N" by simp
have "u n x / n ≤ (birkhoff_sum (λx. u N x / real N) (n - 2 * N) x
+ (∑i<N. ¦u 1 ((T ^^ i) x)¦)
+ 2 * (∑i<2*N. ¦u 1 ((T ^^ (n - (2 * N - i))) x)¦)) / n"
using subcocycle_bounded_by_birkhoffN[OF assms(1) ‹n>2*N› ‹N>0›, of x] ‹n>2*N› by (simp add: divide_right_mono)
also have "... = ?f n"
apply (subst add_divide_distrib)+ by (auto simp add: sum_divide_distrib[symmetric])
finally have "u n x / n ≤ ?f n" by simp
then have "u n x / n ≤ ereal(?f n)" by simp
}
have "(λn. ?f n) ⇢ real_cond_exp M Invariants (λx. u N x / N) x + (∑i<N. 0 * ¦u 1 ((T ^^ i) x)¦) + 2 * (∑i<2*N. 0)"
apply (intro tendsto_intros) using H(2) tendsto_norm[OF H(1)] by auto
then have "(λn. ereal(?f n)) ⇢ real_cond_exp M Invariants (λx. u N x / N) x"
by auto
with lim_mono[OF ‹⋀n. n ≥ 2*N+1 ⟹ u n x / n ≤ ereal(?f n)› H(3) this]
have "subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (λx. u N x / N) x" by simp
}
ultimately show ?thesis by auto
qed
text ‹To apply Scheffe criterion, we need to deal with nonnegative functions, or equivalently
with nonpositive functions after a change of sign. Hence, as in the proof of the almost
sure version of Kingman theorem above, we first give the proof assuming that the
subcocycle is nonpositive, and deduce the general statement by adding a suitable
Birkhoff sum.›
lemma kingman_theorem_L1_aux:
assumes "subcocycle u" "subcocycle_avg_ereal u > -∞" "⋀x. u 1 x ≤ 0"
shows "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x"
"integrable M (subcocycle_lim u)"
"(λn. (∫⇧+x. abs(u n x / n - subcocycle_lim u x) ∂M)) ⇢ 0"
proof -
have int_u [measurable]: "⋀n. integrable M (u n)" using assms(1) subcocycle_def by auto
then have int_F [measurable]: "⋀n. integrable M (λx. - u n x/ n)" by auto
have F_pos: "- u n x / n ≥ 0" for x n
proof (cases "n > 0")
case True
have "u n x ≤ (∑i<n. u 1 ((T ^^ i) x))"
using subcocycle_bounded_by_birkhoff1[OF assms(1) ‹n>0›] unfolding birkhoff_sum_def by simp
also have "... ≤ 0" using sum_mono[OF assms(3)] by auto
finally have "u n x ≤ 0" by simp
then have "-u n x ≥ 0" by simp
with divide_nonneg_nonneg[OF this] show "- u n x / n ≥ 0" using ‹n>0› by auto
qed (auto)
{
fix x assume *: "(λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
have H: "(λn. - u n x / n) ⇢ - subcocycle_lim_ereal u x"
using tendsto_cmult_ereal[OF _ *, of "-1"] by auto
have "liminf (λn. -u n x / n) = - subcocycle_lim_ereal u x"
"(λn. - u n x / n) ⇢ - subcocycle_lim_ereal u x"
"- subcocycle_lim_ereal u x ≥ 0"
using H apply (simp add: tendsto_iff_Liminf_eq_Limsup, simp)
apply (rule LIMSEQ_le_const[OF H]) using F_pos by auto
}
then have AE_1: "AE x in M. liminf (λn. -u n x / n) = - subcocycle_lim_ereal u x"
"AE x in M. (λn. - u n x / n) ⇢ - subcocycle_lim_ereal u x"
"AE x in M. - subcocycle_lim_ereal u x ≥ 0"
using kingman_theorem_AE_nonergodic_ereal[OF assms(1)] by auto
have "(∫⇧+ x. -subcocycle_lim_ereal u x ∂M) = (∫⇧+ x. liminf (λn. -u n x / n) ∂M)"
apply (rule nn_integral_cong_AE) using AE_1(1) by auto
also have "... ≤ liminf (λn. ∫⇧+ x. -u n x / n ∂M)"
apply (subst e2ennreal_Liminf)
apply (simp_all add: e2ennreal_ereal)
using F_pos by (intro nn_integral_liminf) (simp add: int_F)
also have "... = - subcocycle_avg_ereal u"
proof -
have "(λn. (∫x. u n x / n ∂M)) ⇢ subcocycle_avg_ereal u"
by (rule subcocycle_int_tendsto_avg_ereal[OF assms(1)])
with tendsto_cmult_ereal[OF _ this, of "-1"]
have "(λn. (∫x. - u n x / n ∂M)) ⇢ - subcocycle_avg_ereal u" by simp
then have "- subcocycle_avg_ereal u = liminf (λn. (∫x. - u n x / n ∂M))"
by (simp add: tendsto_iff_Liminf_eq_Limsup)
moreover have "(∫⇧+ x. ennreal (-u n x / n) ∂M) = ennreal(∫x. - u n x / n ∂M)" for n
apply (rule nn_integral_eq_integral[OF int_F]) using F_pos by auto
ultimately show ?thesis
by (auto simp: e2ennreal_Liminf e2ennreal_ereal)
qed
finally have "(∫⇧+ x. -subcocycle_lim_ereal u x ∂M) ≤ - subcocycle_avg_ereal u" by simp
also have "… < ∞" using assms(2)
by (cases "subcocycle_avg_ereal u") (auto simp: e2ennreal_ereal e2ennreal_neg)
finally have *: "(∫⇧+ x. -subcocycle_lim_ereal u x ∂M) < ∞" .
have "AE x in M. e2ennreal (- subcocycle_lim_ereal u x) ≠ ∞"
apply (rule nn_integral_PInf_AE) using * by auto
then have **: "AE x in M. - subcocycle_lim_ereal u x ≠ ∞"
using AE_1(3) by eventually_elim simp
{
fix x assume H: "- subcocycle_lim_ereal u x ≠ ∞"
"(λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
"- subcocycle_lim_ereal u x ≥ 0"
then have 1: "abs(subcocycle_lim_ereal u x) ≠ ∞" by auto
then have 2: "(λn. u n x / n) ⇢ subcocycle_lim u x" using H(2) unfolding subcocycle_lim_def by auto
then have 3: "(λn. - (u n x / n)) ⇢ - subcocycle_lim u x" using tendsto_mult[OF _ 2, of "λ_. -1", of "-1"] by auto
have 4: "-subcocycle_lim u x ≥ 0" using H(3) unfolding subcocycle_lim_def by auto
have "abs(subcocycle_lim_ereal u x) ≠ ∞"
"(λn. u n x / n) ⇢ subcocycle_lim u x"
"(λn. - (u n x / n)) ⇢ - subcocycle_lim u x"
"-subcocycle_lim u x ≥ 0"
using 1 2 3 4 by auto
}
then have AE_2: "AE x in M. abs(subcocycle_lim_ereal u x) ≠ ∞"
"AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x"
"AE x in M. (λn. - (u n x / n)) ⇢ - subcocycle_lim u x"
"AE x in M. -subcocycle_lim u x ≥ 0"
using kingman_theorem_AE_nonergodic_ereal[OF assms(1)] ** AE_1(3) by auto
then show "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x" by simp
have "(∫⇧+x. abs(subcocycle_lim u x) ∂M) = (∫⇧+x. -subcocycle_lim_ereal u x ∂M)"
apply (rule nn_integral_cong_AE)
using AE_2 unfolding subcocycle_lim_def abs_real_of_ereal
apply eventually_elim
by (auto simp: e2ennreal_ereal)
then have A: "(∫⇧+x. abs(subcocycle_lim u x) ∂M) < ∞" using * by auto
show int_Gr: "integrable M (subcocycle_lim u)"
apply (rule integrableI_bounded) using A by auto
have B: "(λn. (∫⇧+ x. norm((- u n x /n) - (-subcocycle_lim u x)) ∂M)) ⇢ 0"
proof (rule Scheffe_lemma1, auto simp add: int_Gr int_u AE_2(2) AE_2(3))
{
fix n assume "n>(0::nat)"
have *: "AE x in M. subcocycle_lim u x ≤ real_cond_exp M Invariants (λx. u n x / n) x"
using subcocycle_lim_ereal_atmost_uN_invariants[OF assms(1) ‹n>0›] AE_2(1)
unfolding subcocycle_lim_def by auto
have "(∫x. subcocycle_lim u x ∂M) ≤ (∫x. real_cond_exp M Invariants (λx. u n x / n) x ∂M)"
apply (rule integral_mono_AE[OF int_Gr _ *], rule real_cond_exp_int(1)) using int_u by auto
also have "... = (∫x. u n x / n ∂M)" apply (rule real_cond_exp_int(2)) using int_u by auto
finally have A: "(∫x. subcocycle_lim u x ∂M) ≤ (∫x. u n x / n ∂M)" by auto
have "(∫⇧+x. abs(u n x) / n ∂M) = (∫⇧+x. - u n x / n ∂M)"
apply (rule nn_integral_cong) using F_pos abs_of_nonneg by (intro arg_cong[where f = ennreal]) fastforce
also have "... = (∫x. - u n x / n ∂M)"
apply (rule nn_integral_eq_integral) using F_pos int_F by auto
also have "... ≤ (∫x. - subcocycle_lim u x ∂M)" using A by (auto intro!: ennreal_leI)
also have "... = (∫⇧+x. - subcocycle_lim u x ∂M)"
apply (rule nn_integral_eq_integral[symmetric]) using int_Gr AE_2(4) by auto
also have "... = (∫⇧+x. abs(subcocycle_lim u x) ∂M)"
apply (rule nn_integral_cong_AE) using AE_2(4) by auto
finally have "(∫⇧+x. abs(u n x) / n ∂M) ≤ (∫⇧+x. abs(subcocycle_lim u x) ∂M)" by simp
}
with eventually_mono[OF eventually_gt_at_top[of 0] this]
have "eventually (λn. (∫⇧+x. abs(u n x) / n ∂M) ≤ (∫⇧+x. abs(subcocycle_lim u x) ∂M)) sequentially"
by fastforce
then show "limsup (λn. ∫⇧+ x. abs(u n x) / n ∂M) ≤ ∫⇧+ x. abs(subcocycle_lim u x) ∂M"
using Limsup_bounded by fastforce
qed
moreover have "norm((- u n x /n) - (-subcocycle_lim u x)) = abs(u n x / n - subcocycle_lim u x)"
for n x by auto
ultimately show "(λn. ∫⇧+ x. ennreal ¦u n x / real n - subcocycle_lim u x¦ ∂M) ⇢ 0"
by auto
qed
text ‹We can then remove the nonpositivity assumption, by subtracting the Birkhoff sums of $u_1$
to a general subcocycle $u$.›
theorem kingman_theorem_nonergodic:
assumes "subcocycle u" "subcocycle_avg_ereal u > -∞"
shows "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x"
"integrable M (subcocycle_lim u)"
"(λn. (∫⇧+x. abs(u n x / n - subcocycle_lim u x) ∂M)) ⇢ 0"
proof -
have [measurable]: "u n ∈ borel_measurable M" for n using assms(1) unfolding subcocycle_def by auto
have int_u [measurable]: "integrable M (u 1)" using assms(1) subcocycle_def by auto
define v where "v = (λn x. u n x + birkhoff_sum (λx. - u 1 x) n x)"
have [measurable]: "v n ∈ borel_measurable M" for n unfolding v_def by auto
define w where "w = birkhoff_sum (u 1)"
have [measurable]: "w n ∈ borel_measurable M" for n unfolding w_def by auto
have "subcocycle v" unfolding v_def
apply (rule subcocycle_add[OF assms(1)], rule subcocycle_birkhoff)
using assms unfolding subcocycle_def by auto
have "subcocycle w" unfolding w_def by (rule subcocycle_birkhoff[OF int_u])
have uvw: "u n x = v n x + w n x" for n x
unfolding v_def w_def birkhoff_sum_def by (auto simp add: sum_negf)
then have "subcocycle_avg_ereal (λn x. u n x) = subcocycle_avg_ereal v + subcocycle_avg_ereal w"
using subcocycle_avg_ereal_add[OF ‹subcocycle v› ‹subcocycle w›] by auto
then have "subcocycle_avg_ereal u = subcocycle_avg_ereal v + subcocycle_avg_ereal w"
by auto
then have "subcocycle_avg_ereal v > -∞"
unfolding w_def using subcocycle_avg_ereal_birkhoff[OF int_u] assms(2) by auto
have "subcocycle_avg_ereal w > - ∞"
unfolding w_def using subcocycle_avg_birkhoff[OF int_u] by auto
have "⋀x. v 1 x ≤ 0" unfolding v_def by auto
have v: "AE x in M. (λn. v n x / n) ⇢ subcocycle_lim v x"
"integrable M (subcocycle_lim v)"
"(λn. (∫⇧+x. abs(v n x / n - subcocycle_lim v x) ∂M)) ⇢ 0"
using kingman_theorem_L1_aux[OF ‹subcocycle v› ‹subcocycle_avg_ereal v > -∞› ‹⋀x. v 1 x ≤ 0›] by auto
have w: "AE x in M. (λn. w n x / n) ⇢ subcocycle_lim w x"
"integrable M (subcocycle_lim w)"
"(λn. (∫⇧+x. abs(w n x / n - subcocycle_lim w x) ∂M)) ⇢ 0"
proof -
show "AE x in M. (λn. w n x / n) ⇢ subcocycle_lim w x"
unfolding w_def subcocycle_lim_def using subcocycle_lim_ereal_birkhoff[OF int_u]
birkhoff_theorem_AE_nonergodic[OF int_u] by auto
show "integrable M (subcocycle_lim w)"
apply (subst integrable_cong_AE[where ?g = "λx. real_cond_exp M Invariants (u 1) x"])
unfolding w_def subcocycle_lim_def
using subcocycle_lim_ereal_birkhoff[OF int_u] real_cond_exp_int(1)[OF int_u] by auto
have "(∫⇧+x. abs(w n x / n - subcocycle_lim w x) ∂M)
= (∫⇧+x. abs(birkhoff_sum (u 1) n x / n - real_cond_exp M Invariants (u 1) x) ∂M)" for n
apply (rule nn_integral_cong_AE)
unfolding w_def subcocycle_lim_def using subcocycle_lim_ereal_birkhoff[OF int_u] by auto
then show "(λn. (∫⇧+x. abs(w n x / n - subcocycle_lim w x) ∂M)) ⇢ 0"
using birkhoff_theorem_L1_nonergodic[OF int_u] by auto
qed
{
fix x assume H: "(λn. v n x / n) ⇢ subcocycle_lim v x"
"(λn. w n x / n) ⇢ subcocycle_lim w x"
"(λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
then have "(λn. v n x / n + w n x / n) ⇢ subcocycle_lim v x + subcocycle_lim w x"
using tendsto_add[OF H(1) H(2)] by simp
then have *: "(λn. ereal(u n x / n)) ⇢ ereal(subcocycle_lim v x + subcocycle_lim w x)"
unfolding uvw by (simp add: add_divide_distrib)
then have "subcocycle_lim_ereal u x = ereal(subcocycle_lim v x + subcocycle_lim w x)"
using H(3) LIMSEQ_unique by blast
then have **: "subcocycle_lim u x = subcocycle_lim v x + subcocycle_lim w x"
using subcocycle_lim_def by auto
have "u n x / n - subcocycle_lim u x = v n x / n - subcocycle_lim v x + w n x / n - subcocycle_lim w x" for n
apply (subst **, subst uvw) using add_divide_distrib add.commute by auto
then have "(λn. u n x / n) ⇢ subcocycle_lim u x
∧ subcocycle_lim u x = subcocycle_lim v x + subcocycle_lim w x
∧ (∀n. u n x / n - subcocycle_lim u x = v n x / n - subcocycle_lim v x + w n x / n - subcocycle_lim w x)"
using * ** by auto
}
then have AE: "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x"
"AE x in M. subcocycle_lim u x = subcocycle_lim v x + subcocycle_lim w x"
"AE x in M. ∀n. u n x / n - subcocycle_lim u x = v n x / n - subcocycle_lim v x + w n x / n - subcocycle_lim w x"
using v(1) w(1) kingman_theorem_AE_nonergodic_ereal[OF assms(1)] by auto
then show "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x" by simp
show "integrable M (subcocycle_lim u)"
apply (subst integrable_cong_AE[where ?g = "λx. subcocycle_lim v x + subcocycle_lim w x"])
by (auto simp add: AE(2) v(2) w(2))
show "(λn. (∫⇧+x. abs(u n x / n - subcocycle_lim u x) ∂M)) ⇢ 0"
proof (rule tendsto_sandwich[where ?f = "λ_. 0"
and ?h = "λn. (∫⇧+x. abs(v n x / n - subcocycle_lim v x) ∂M) + (∫⇧+x. abs(w n x / n - subcocycle_lim w x) ∂M)"], auto)
{
fix n
have "(∫⇧+x. abs(u n x / n - subcocycle_lim u x) ∂M)
= (∫⇧+x. abs((v n x / n - subcocycle_lim v x) + (w n x / n - subcocycle_lim w x)) ∂M)"
apply (rule nn_integral_cong_AE) using AE(3) by auto
also have "... ≤ (∫⇧+x. ennreal(abs(v n x / n - subcocycle_lim v x)) + abs(w n x / n - subcocycle_lim w x) ∂M)"
by (rule nn_integral_mono, auto simp add: ennreal_plus[symmetric] simp del: ennreal_plus)
also have "... = (∫⇧+x. abs(v n x / n - subcocycle_lim v x) ∂M) + (∫⇧+x. abs(w n x / n - subcocycle_lim w x) ∂M)"
by (rule nn_integral_add, auto, measurable)
finally have "(∫⇧+x. abs(u n x / n - subcocycle_lim u x) ∂M)
≤ (∫⇧+x. abs(v n x / n - subcocycle_lim v x) ∂M) + (∫⇧+x. abs(w n x / n - subcocycle_lim w x) ∂M)"
using tendsto_sandwich by simp
}
then show "eventually (λn. (∫⇧+x. abs(u n x / n - subcocycle_lim u x) ∂M)
≤ (∫⇧+x. abs(v n x / n - subcocycle_lim v x) ∂M) + (∫⇧+x. abs(w n x / n - subcocycle_lim w x) ∂M)) sequentially"
by auto
have "(λn. (∫⇧+x. abs(v n x / n - subcocycle_lim v x) ∂M) + (∫⇧+x. abs(w n x / n - subcocycle_lim w x) ∂M))
⇢ 0 + 0"
by (rule tendsto_add[OF v(3) w(3)])
then show "(λn. (∫⇧+x. abs(v n x / n - subcocycle_lim v x) ∂M) + (∫⇧+x. abs(w n x / n - subcocycle_lim w x) ∂M))
⇢ 0"
by simp
qed
qed
text ‹From the almost sure convergence, we can prove the basic properties of the (real)
subcocycle limit: relationship to the asymptotic average, behavior under sum, multiplication,
max, behavior for Birkhoff sums.›
lemma subcocycle_lim_avg:
assumes "subcocycle u" "subcocycle_avg_ereal u > -∞"
shows "(∫x. subcocycle_lim u x ∂M) = subcocycle_avg u"
proof -
have H: "(λn. (∫⇧+x. norm(u n x / n - subcocycle_lim u x) ∂M)) ⇢ 0"
"integrable M (subcocycle_lim u)"
using kingman_theorem_nonergodic[OF assms] by auto
have "(λn. (∫x. u n x / n ∂M)) ⇢ (∫x. subcocycle_lim u x ∂M)"
apply (rule tendsto_L1_int[OF _ H(2) H(1)]) using subcocycle_integrable[OF assms(1)] by auto
then have "(λn. (∫x. u n x / n ∂M)) ⇢ ereal (∫x. subcocycle_lim u x ∂M)" by auto
moreover have "(λn. (∫x. u n x / n ∂M)) ⇢ ereal (subcocycle_avg u)"
using subcocycle_int_tendsto_avg[OF assms] by auto
ultimately show ?thesis using LIMSEQ_unique by blast
qed
lemma subcocycle_lim_real_ereal:
assumes "subcocycle u" "subcocycle_avg_ereal u > -∞"
shows "AE x in M. subcocycle_lim_ereal u x = ereal(subcocycle_lim u x)"
proof -
{
fix x assume H: "(λn. u n x / n) ⇢ subcocycle_lim_ereal u x"
"(λn. u n x / n) ⇢ subcocycle_lim u x"
then have "(λn. u n x / n) ⇢ ereal(subcocycle_lim u x)" by auto
then have "subcocycle_lim_ereal u x = ereal(subcocycle_lim u x)"
using H(1) LIMSEQ_unique by blast
}
then show ?thesis
using kingman_theorem_AE_nonergodic_ereal[OF assms(1)] kingman_theorem_nonergodic(1)[OF assms] by auto
qed
lemma subcocycle_lim_add:
assumes "subcocycle u" "subcocycle v" "subcocycle_avg_ereal u > -∞" "subcocycle_avg_ereal v > -∞"
shows "subcocycle_avg_ereal (λn x. u n x + v n x) > - ∞"
"AE x in M. subcocycle_lim (λn x. u n x + v n x) x = subcocycle_lim u x + subcocycle_lim v x"
proof -
show *: "subcocycle_avg_ereal (λn x. u n x + v n x) > - ∞"
using subcocycle_avg_add[OF assms(1) assms(2)] assms(3) assms(4) by auto
have "AE x in M. (λn. (u n x + v n x)/n) ⇢ subcocycle_lim (λn x. u n x + v n x) x"
by (rule kingman_theorem_nonergodic(1)[OF subcocycle_add[OF assms(1) assms(2)] *])
moreover have "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x"
by (rule kingman_theorem_nonergodic[OF assms(1) assms(3)])
moreover have "AE x in M. (λn. v n x / n) ⇢ subcocycle_lim v x"
by (rule kingman_theorem_nonergodic[OF assms(2) assms(4)])
moreover
{
fix x assume H: "(λn. (u n x + v n x)/n) ⇢ subcocycle_lim (λn x. u n x + v n x) x"
"(λn. u n x / n) ⇢ subcocycle_lim u x"
"(λn. v n x / n) ⇢ subcocycle_lim v x"
have *: "(u n x + v n x)/n = (u n x / n) + (v n x / n)" for n
by (simp add: add_divide_distrib)
have "(λn. (u n x + v n x)/n) ⇢ subcocycle_lim u x + subcocycle_lim v x"
unfolding * by (intro tendsto_intros H)
then have "subcocycle_lim (λn x. u n x + v n x) x = subcocycle_lim u x + subcocycle_lim v x"
using H(1) by (simp add: LIMSEQ_unique)
}
ultimately show "AE x in M. subcocycle_lim (λn x. u n x + v n x) x
= subcocycle_lim u x + subcocycle_lim v x"
by auto
qed
lemma subcocycle_lim_cmult:
assumes "subcocycle u" "subcocycle_avg_ereal u > -∞" "c≥(0::real)"
shows "subcocycle_avg_ereal (λn x. c * u n x) > - ∞"
"AE x in M. subcocycle_lim (λn x. c * u n x) x = c * subcocycle_lim u x"
proof -
show *: "subcocycle_avg_ereal (λn x. c * u n x) > - ∞"
using subcocycle_avg_cmult[OF assms(1) assms(3)] assms(2) assms(3) by auto
have "AE x in M. (λn. (c * u n x)/n) ⇢ subcocycle_lim (λn x. c * u n x) x"
by (rule kingman_theorem_nonergodic(1)[OF subcocycle_cmult[OF assms(1) assms(3)] *])
moreover have "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x"
by (rule kingman_theorem_nonergodic(1)[OF assms(1) assms(2)])
moreover
{
fix x assume H: "(λn. (c * u n x)/n) ⇢ subcocycle_lim (λn x. c * u n x) x"
"(λn. u n x / n) ⇢ subcocycle_lim u x"
have "(λn. c * (u n x / n)) ⇢ c * subcocycle_lim u x"
by (rule tendsto_mult[OF _ H(2)], auto)
then have "subcocycle_lim (λn x. c * u n x) x = c * subcocycle_lim u x"
using H(1) by (simp add: LIMSEQ_unique)
}
ultimately show "AE x in M. subcocycle_lim (λn x. c * u n x) x = c * subcocycle_lim u x" by auto
qed
lemma subcocycle_lim_max:
assumes "subcocycle u" "subcocycle v" "subcocycle_avg_ereal u > -∞" "subcocycle_avg_ereal v > -∞"
shows "subcocycle_avg_ereal (λn x. max (u n x) (v n x)) > - ∞"
"AE x in M. subcocycle_lim (λn x. max (u n x) (v n x)) x = max (subcocycle_lim u x) (subcocycle_lim v x)"
proof -
show *: "subcocycle_avg_ereal (λn x. max (u n x) (v n x)) > - ∞"
using subcocycle_avg_max(1)[OF assms(1) assms(2)] assms(3) assms(4) by auto
have "AE x in M. (λn. max (u n x) (v n x) / n) ⇢ subcocycle_lim (λn x. max (u n x) (v n x)) x"
by (rule kingman_theorem_nonergodic[OF subcocycle_max[OF assms(1) assms(2)] *])
moreover have "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x"
by (rule kingman_theorem_nonergodic[OF assms(1) assms(3)])
moreover have "AE x in M. (λn. v n x / n) ⇢ subcocycle_lim v x"
by (rule kingman_theorem_nonergodic[OF assms(2) assms(4)])
moreover
{
fix x assume H: "(λn. max (u n x) (v n x) / n) ⇢ subcocycle_lim (λn x. max (u n x) (v n x)) x"
"(λn. u n x / n) ⇢ subcocycle_lim u x"
"(λn. v n x / n) ⇢ subcocycle_lim v x"
have "(λn. max (u n x / n) (v n x / n)) ⇢ max (subcocycle_lim u x) (subcocycle_lim v x)"
apply (rule tendsto_max) using H by auto
moreover have "max (u n x / n) (v n x / n) = max (u n x) (v n x) / n" for n
by (simp add: max_divide_distrib_right)
ultimately have "(λn. max (u n x) (v n x) / n) ⇢ max (subcocycle_lim u x) (subcocycle_lim v x)"
by auto
then have "subcocycle_lim (λn x. max (u n x) (v n x)) x = max (subcocycle_lim u x) (subcocycle_lim v x)"
using H(1) by (simp add: LIMSEQ_unique)
}
ultimately show "AE x in M. subcocycle_lim (λn x. max (u n x) (v n x)) x
= max (subcocycle_lim u x) (subcocycle_lim v x)" by auto
qed
lemma subcocycle_lim_birkhoff:
assumes "integrable M u"
shows "subcocycle_avg_ereal (birkhoff_sum u) > -∞"
"AE x in M. subcocycle_lim (birkhoff_sum u) x = real_cond_exp M Invariants u x"
proof -
show *: "subcocycle_avg_ereal (birkhoff_sum u) > -∞"
using subcocycle_avg_birkhoff[OF assms] by auto
have "AE x in M. (λn. birkhoff_sum u n x / n) ⇢ real_cond_exp M Invariants u x"
by (rule birkhoff_theorem_AE_nonergodic[OF assms])
moreover have "AE x in M. (λn. birkhoff_sum u n x / n) ⇢ subcocycle_lim (birkhoff_sum u) x"
by (rule kingman_theorem_nonergodic(1)[OF subcocycle_birkhoff[OF assms] *])
moreover
{
fix x assume H: "(λn. birkhoff_sum u n x / n) ⇢ real_cond_exp M Invariants u x"
"(λn. birkhoff_sum u n x / n) ⇢ subcocycle_lim (birkhoff_sum u) x"
then have "subcocycle_lim (birkhoff_sum u) x = real_cond_exp M Invariants u x"
using H(2) by (simp add: LIMSEQ_unique)
}
ultimately show "AE x in M. subcocycle_lim (birkhoff_sum u) x = real_cond_exp M Invariants u x" by auto
qed
subsection ‹Conditional expectations of subcocycles›
text ‹In this subsection, we show that the conditional expectations of a subcocycle
(with respect to the invariant subalgebra) also converge, with the same limit as the
cocycle.
Note that the conditional expectation of a subcocycle $u$ is still a subcocycle, with the
same average at each step so with the same asymptotic average. Kingman theorem can be applied to
it, and what we have to show is that the limit of this subcocycle is the same as the limit
of the original subcocycle.
When the asymptotic average is $>-\infty$, both limits have the same integral, and moreover
the domination of the subcocycle by the Birkhoff sums of $u_n$ for fixed $n$
(which converge to the conditional expectation of $u_n$) implies that one limit is smaller than
the other. Hence, they coincide almost everywhere.
The case when the asymptotic average is $-\infty$ is deduced from the previous one by truncation.
›
text ‹First, we prove the result when the asymptotic average with finite.›
theorem kingman_theorem_nonergodic_invariant:
assumes "subcocycle u" "subcocycle_avg_ereal u > -∞"
shows "AE x in M. (λn. real_cond_exp M Invariants (u n) x / n) ⇢ subcocycle_lim u x"
"(λn. (∫⇧+x. abs(real_cond_exp M Invariants (u n) x / n - subcocycle_lim u x) ∂M)) ⇢ 0"
proof -
have int [simp]: "integrable M (u n)" for n using subcocycle_integrable[OF assms(1)] by auto
then have int2: "integrable M (real_cond_exp M Invariants (u n))" for n using real_cond_exp_int by auto
{
fix n m
have "u (n+m) x ≤ u n x + u m ((T^^n) x)" for x
using subcocycle_ineq[OF assms(1)] by auto
have "AE x in M. real_cond_exp M Invariants (u (n+m)) x ≤ real_cond_exp M Invariants (λx. u n x + u m ((T^^n) x)) x"
apply (rule real_cond_exp_mono)
using subcocycle_ineq[OF assms(1)] apply auto
by (rule Bochner_Integration.integrable_add, auto simp add: Tn_integral_preserving)
moreover have "AE x in M. real_cond_exp M Invariants (λx. u n x + u m ((T^^n) x)) x
= real_cond_exp M Invariants (u n) x + real_cond_exp M Invariants (λx. u m ((T^^n) x)) x"
by (rule real_cond_exp_add, auto simp add: Tn_integral_preserving)
moreover have "AE x in M. real_cond_exp M Invariants (u m ∘ ((T^^n))) x = real_cond_exp M Invariants (u m) x"
by (rule Invariants_of_foTn, simp)
moreover have "AE x in M. real_cond_exp M Invariants (u m) x = real_cond_exp M Invariants (u m) ((T^^n) x)"
using Invariants_func_is_invariant_n[symmetric, of "real_cond_exp M Invariants (u m)"] by auto
ultimately have "AE x in M. real_cond_exp M Invariants (u (n+m)) x
≤ real_cond_exp M Invariants (u n) x + real_cond_exp M Invariants (u m) ((T^^n) x)"
unfolding o_def by auto
}
with subcocycle_AE[OF this int2]
obtain w where w: "subcocycle w" "AE x in M. ∀n. w n x = real_cond_exp M Invariants (u n) x"
by blast
have [measurable]: "integrable M (w n)" for n using subcocycle_integrable[OF w(1)] by simp
{
fix n::nat
have "(∫x. w n x / n ∂M) = (∫x. real_cond_exp M Invariants (u n) x / n ∂M)"
apply (rule integral_cong_AE) using w(2) by auto
also have "... = (∫x. real_cond_exp M Invariants (u n) x ∂M) / n"
by (rule integral_divide_zero)
also have "... = (∫x. u n x ∂M) / n"
by (simp add: divide_simps real_cond_exp_int(2)[OF int[of n]])
also have "... = (∫x. u n x / n ∂M)"
by (rule integral_divide_zero[symmetric])
finally have "ereal (∫x. w n x / n ∂M) = ereal (∫x. u n x / n ∂M)" by simp
} note * = this
have "(λn. (∫x. u n x / n ∂M)) ⇢ subcocycle_avg_ereal w"
apply (rule Lim_transform_eventually[OF subcocycle_int_tendsto_avg_ereal[OF w(1)]])
using * by auto
then have "subcocycle_avg_ereal u = subcocycle_avg_ereal w"
using subcocycle_int_tendsto_avg_ereal[OF assms(1)] LIMSEQ_unique by auto
then have "subcocycle_avg_ereal w > -∞" using assms(2) by simp
have "subcocycle_avg u = subcocycle_avg w"
using ‹subcocycle_avg_ereal u = subcocycle_avg_ereal w› unfolding subcocycle_avg_def by simp
have *: "AE x in M. N > 0 ⟶ subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (λx. u N x / N) x" for N
by (cases "N = 0", auto simp add: subcocycle_lim_ereal_atmost_uN_invariants[OF assms(1)])
have "AE x in M. ∀N. N > 0 ⟶ subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (λx. u N x / N) x"
by (subst AE_all_countable, intro allI, simp add: *)
moreover have "AE x in M. subcocycle_lim_ereal u x = ereal(subcocycle_lim u x)"
by (rule subcocycle_lim_real_ereal[OF assms])
moreover have "AE x in M. (λN. u N x / N) ⇢ subcocycle_lim u x"
using kingman_theorem_nonergodic[OF assms] by simp
moreover have "AE x in M. (λN. w N x / N) ⇢ subcocycle_lim w x"
using kingman_theorem_nonergodic[OF w(1) ‹subcocycle_avg_ereal w > -∞› ] by simp
moreover have "AE x in M. ∀n. w n x = real_cond_exp M Invariants (u n) x"
using w(2) by simp
moreover have "AE x in M. ∀n. real_cond_exp M Invariants (u n) x / n = real_cond_exp M Invariants (λx. u n x / n) x"
apply (subst AE_all_countable, intro allI) using AE_symmetric[OF real_cond_exp_cdiv[OF int]] by auto
moreover
{
fix x assume x: "∀N. N > 0 ⟶ subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (λx. u N x / N) x"
"subcocycle_lim_ereal u x = ereal(subcocycle_lim u x)"
"(λN. u N x / N) ⇢ subcocycle_lim u x"
"(λN. w N x / N) ⇢ subcocycle_lim w x"
"∀n. w n x = real_cond_exp M Invariants (u n) x"
"∀n. real_cond_exp M Invariants (u n) x / n = real_cond_exp M Invariants (λx. u n x / n) x"
{
fix N::nat assume "N≥1"
have "subcocycle_lim u x ≤ real_cond_exp M Invariants (λx. u N x / N) x"
using x(1) x(2) ‹N≥1› by auto
also have "... = real_cond_exp M Invariants (u N) x / N"
using x(6) by simp
also have "... = w N x / N"
using x(5) by simp
finally have "subcocycle_lim u x ≤ w N x / N"
by simp
} note * = this
have "subcocycle_lim u x ≤ subcocycle_lim w x"
apply (rule LIMSEQ_le_const[OF x(4)]) using * by auto
}
ultimately have *: "AE x in M. subcocycle_lim u x ≤ subcocycle_lim w x"
by auto
have **: "(∫x. subcocycle_lim u x ∂M) = (∫x. subcocycle_lim w x ∂M)"
using subcocycle_lim_avg[OF assms] subcocycle_lim_avg[OF w(1) ‹subcocycle_avg_ereal w > -∞›]
‹subcocycle_avg u = subcocycle_avg w› by simp
have AE_eq: "AE x in M. subcocycle_lim u x = subcocycle_lim w x"
by (rule integral_ineq_eq_0_then_AE[OF * kingman_theorem_nonergodic(2)[OF assms]
kingman_theorem_nonergodic(2)[OF w(1) ‹subcocycle_avg_ereal w > -∞›] **])
moreover have "AE x in M. (λn. w n x / n) ⇢ subcocycle_lim w x"
by (rule kingman_theorem_nonergodic(1)[OF w(1) ‹subcocycle_avg_ereal w > -∞›])
moreover have "AE x in M. ∀n. w n x = real_cond_exp M Invariants (u n) x"
using w(2) by auto
moreover
{
fix x assume H: "subcocycle_lim u x = subcocycle_lim w x"
"(λn. w n x / n) ⇢ subcocycle_lim w x"
"∀n. w n x = real_cond_exp M Invariants (u n) x"
then have "(λn. real_cond_exp M Invariants (u n) x / n) ⇢ subcocycle_lim u x"
by auto
}
ultimately show "AE x in M. (λn. real_cond_exp M Invariants (u n) x / n) ⇢ subcocycle_lim u x"
by auto
{
fix n::nat
have "AE x in M. subcocycle_lim u x = subcocycle_lim w x"
using AE_eq by simp
moreover have "AE x in M. w n x = real_cond_exp M Invariants (u n) x"
using w(2) by auto
moreover
{
fix x assume H: "subcocycle_lim u x = subcocycle_lim w x"
"w n x = real_cond_exp M Invariants (u n) x"
then have "ennreal ¦real_cond_exp M Invariants (u n) x / real n - subcocycle_lim u x¦
= ennreal ¦w n x / real n - subcocycle_lim w x¦"
by auto
}
ultimately have "AE x in M. ennreal ¦real_cond_exp M Invariants (u n) x / real n - subcocycle_lim u x¦
= ennreal ¦w n x / real n - subcocycle_lim w x¦"
by auto
then have "(∫⇧+ x. ennreal ¦real_cond_exp M Invariants (u n) x / real n - subcocycle_lim u x¦ ∂M)
= (∫⇧+ x. ennreal ¦w n x / real n - subcocycle_lim w x¦ ∂M)"
by (rule nn_integral_cong_AE)
}
moreover have "(λn. (∫⇧+ x. ¦w n x / real n - subcocycle_lim w x¦ ∂M)) ⇢ 0"
by (rule kingman_theorem_nonergodic(3)[OF w(1) ‹subcocycle_avg_ereal w > -∞›])
ultimately show "(λn. (∫⇧+ x. ¦real_cond_exp M Invariants (u n) x / real n - subcocycle_lim u x¦ ∂M)) ⇢ 0"
by auto
qed
text ‹Then, we extend it by truncation to the general case, i.e., to the asymptotic
limit in extended reals.›
theorem kingman_theorem_AE_nonergodic_invariant_ereal:
assumes "subcocycle u"
shows "AE x in M. (λn. real_cond_exp M Invariants (u n) x / n) ⇢ subcocycle_lim_ereal u x"
proof -
have [simp]: "subcocycle u" using assms by simp
have int [simp]: "integrable M (u n)" for n using subcocycle_integrable[OF assms(1)] by auto
have limsup_ineq_K: "AE x in M.
limsup (λn. real_cond_exp M Invariants (u n) x / n) ≤ max (subcocycle_lim_ereal u x) (-real K)" for K::nat
proof -
define v where "v = (λ (n::nat) (x::'a). (-n * real K))"
have [simp]: "subcocycle v"
unfolding v_def subcocycle_def by (auto simp add: algebra_simps)
have "ereal (∫x. v n x / n ∂M) = ereal(- real K * measure M (space M))" if "n≥1" for n
unfolding v_def using that by simp
then have "(λn. ereal (∫x. v n x / n ∂M)) ⇢ ereal(- real K * measure M (space M))"
using lim_explicit by force
moreover have "(λn. ereal (∫x. v n x / n ∂M)) ⇢ subcocycle_avg_ereal v"
using subcocycle_int_tendsto_avg_ereal[OF ‹subcocycle v›] by auto
ultimately have "subcocycle_avg_ereal v = - real K * measure M (space M)"
using LIMSEQ_unique by blast
then have "subcocycle_avg_ereal v > -∞"
by auto
{
fix x assume H: "(λn. v n x / n) ⇢ subcocycle_lim_ereal v x"
have "ereal(v n x / n) = -real K" if "n≥1" for n
unfolding v_def using that by auto
then have "(λn. ereal(v n x / n)) ⇢ - real K"
using lim_explicit by force
then have "subcocycle_lim_ereal v x = -real K"
using H LIMSEQ_unique by blast
}
then have "AE x in M. subcocycle_lim_ereal v x = -real K"
using kingman_theorem_AE_nonergodic_ereal[OF ‹subcocycle v›] by auto
define w where "w = (λn x. max (u n x) (v n x))"
have [simp]: "subcocycle w"
unfolding w_def by (rule subcocycle_max, auto)
have "subcocycle_avg_ereal w ≥ subcocycle_avg_ereal v"
unfolding w_def using subcocycle_avg_ereal_max by auto
then have "subcocycle_avg_ereal w > -∞"
using ‹subcocycle_avg_ereal v > -∞› by auto
have *: "AE x in M. real_cond_exp M Invariants (u n) x ≤ real_cond_exp M Invariants (w n) x" for n
apply (rule real_cond_exp_mono)
using subcocycle_integrable[OF assms, of n] subcocycle_integrable[OF ‹subcocycle w›, of n] apply auto
unfolding w_def by auto
have "AE x in M. ∀n. real_cond_exp M Invariants (u n) x ≤ real_cond_exp M Invariants (w n) x"
apply (subst AE_all_countable) using * by auto
moreover have "AE x in M. (λn. real_cond_exp M Invariants (w n) x / n) ⇢ subcocycle_lim w x"
apply (rule kingman_theorem_nonergodic_invariant(1))
using ‹subcocycle_avg_ereal w > -∞› by auto
moreover have "AE x in M. subcocycle_lim_ereal w x = max (subcocycle_lim_ereal u x) (subcocycle_lim_ereal v x)"
unfolding w_def using subcocycle_lim_ereal_max by auto
moreover
{
fix x assume H: "(λn. real_cond_exp M Invariants (w n) x / n) ⇢ subcocycle_lim w x"
"subcocycle_lim_ereal w x = max (subcocycle_lim_ereal u x) (subcocycle_lim_ereal v x)"
"subcocycle_lim_ereal v x = - real K"
"∀n. real_cond_exp M Invariants (u n) x ≤ real_cond_exp M Invariants (w n) x"
have "subcocycle_lim_ereal w x > -∞"
using H(2) H(3) MInfty_neq_ereal(1) ereal_MInfty_lessI max.cobounded2 by fastforce
then have "subcocycle_lim_ereal w x = ereal(subcocycle_lim w x)"
unfolding subcocycle_lim_def using subcocycle_lim_ereal_not_PInf[of w x] ereal_real by force
moreover have "(λn. real_cond_exp M Invariants (w n) x / n) ⇢ ereal(subcocycle_lim w x)" using H(1) by auto
ultimately have "(λn. real_cond_exp M Invariants (w n) x / n) ⇢ subcocycle_lim_ereal w x" by auto
then have *: "limsup (λn. real_cond_exp M Invariants (w n) x / n) = subcocycle_lim_ereal w x"
using tendsto_iff_Liminf_eq_Limsup trivial_limit_at_top_linorder by blast
have "ereal(real_cond_exp M Invariants (u n) x / n) ≤ real_cond_exp M Invariants (w n) x / n" for n
using H(4) by (auto simp add: divide_simps)
then have "eventually (λn. ereal(real_cond_exp M Invariants (u n) x / n) ≤ real_cond_exp M Invariants (w n) x / n) sequentially"
by auto
then have "limsup (λn. real_cond_exp M Invariants (u n) x / n) ≤ limsup (λn. real_cond_exp M Invariants (w n) x / n)"
using Limsup_mono[of _ _ sequentially] by force
then have "limsup (λn. real_cond_exp M Invariants (u n) x / n) ≤ max (subcocycle_lim_ereal u x) (-real K)"
using * H(2) H(3) by auto
}
ultimately show ?thesis using ‹AE x in M. subcocycle_lim_ereal v x = -real K› by auto
qed
have "AE x in M. ∀K::nat.
limsup (λn. real_cond_exp M Invariants (u n) x / n) ≤ max (subcocycle_lim_ereal u x) (-real K)"
apply (subst AE_all_countable) using limsup_ineq_K by auto
moreover have "AE x in M. liminf (λn. real_cond_exp M Invariants (u n) x / n) ≥ subcocycle_lim_ereal u x"
proof -
have *: "AE x in M. N > 0 ⟶ subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (λx. u N x / N) x" for N
by (cases "N = 0", auto simp add: subcocycle_lim_ereal_atmost_uN_invariants[OF assms(1)])
have "AE x in M. ∀N. N > 0 ⟶ subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (λx. u N x / N) x"
by (subst AE_all_countable, intro allI, simp add: *)
moreover have "AE x in M. ∀n. real_cond_exp M Invariants (λx. u n x / n) x = real_cond_exp M Invariants (u n) x / n"
apply (subst AE_all_countable, intro allI) using real_cond_exp_cdiv by auto
moreover
{
fix x assume x: "∀N. N > 0 ⟶ subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (λx. u N x / N) x"
"∀n. real_cond_exp M Invariants (λx. u n x / n) x = real_cond_exp M Invariants (u n) x / n"
then have *: "subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (u n) x / n" if "n ≥ 1" for n
using that by auto
have "subcocycle_lim_ereal u x ≤ liminf (λn. real_cond_exp M Invariants (u n) x / n)"
apply (subst liminf_bounded_iff) using * less_le_trans by blast
}
ultimately show ?thesis by auto
qed
moreover
{
fix x assume H: "∀K::nat. limsup (λn. real_cond_exp M Invariants (u n) x / n)
≤ max (subcocycle_lim_ereal u x) (-real K)"
"liminf (λn. real_cond_exp M Invariants (u n) x / n) ≥ subcocycle_lim_ereal u x"
have "(λK::nat. max (subcocycle_lim_ereal u x) (-real K)) ⇢ subcocycle_lim_ereal u x"
by (rule ereal_truncation_bottom)
with LIMSEQ_le_const[OF this]
have *: "limsup (λn. real_cond_exp M Invariants (u n) x / n) ≤ subcocycle_lim_ereal u x"
using H(1) by auto
have "(λn. real_cond_exp M Invariants (u n) x / n) ⇢ subcocycle_lim_ereal u x"
apply (subst tendsto_iff_Liminf_eq_Limsup[OF trivial_limit_at_top_linorder])
using H(2) * Liminf_le_Limsup[OF trivial_limit_at_top_linorder, of "(λn. real_cond_exp M Invariants (u n) x / n)"]
by auto
}
ultimately show ?thesis by auto
qed
end
subsection ‹Subcocycles in the ergodic case›
text ‹In this subsection, we describe how all the previous results simplify in the ergodic case.
Indeed, subcocycle limits are almost surely constant, given by the asymptotic average.›
context ergodic_pmpt begin
lemma subcocycle_ergodic_lim_avg:
assumes "subcocycle u"
shows "AE x in M. subcocycle_lim_ereal u x = subcocycle_avg_ereal u"
"AE x in M. subcocycle_lim u x = subcocycle_avg u"
proof -
have I: "integrable M (u N)" for N using subcocycle_integrable[OF assms]by auto
obtain c::ereal where c: "AE x in M. subcocycle_lim_ereal u x = c"
using Invariant_func_is_AE_constant[OF subcocycle_lim_meas_Inv(1)] by blast
have "c = subcocycle_avg_ereal u"
proof (cases "subcocycle_avg_ereal u = - ∞")
case True
{
fix N assume "N > (0::nat)"
have "AE x in M. real_cond_exp M Invariants (λx. u N x / N) x = (∫ x. u N x / N ∂M)"
apply (rule Invariants_cond_exp_is_integral) using I by auto
moreover have "AE x in M. subcocycle_lim_ereal u x ≤ real_cond_exp M Invariants (λx. u N x / N) x"
using subcocycle_lim_ereal_atmost_uN_invariants[OF assms ‹N>0›] by simp
ultimately have "AE x in M. c ≤ (∫x. u N x / N ∂M)"
using c by force
then have "c ≤ (∫x. u N x / N ∂M)" by auto
}
then have "∀N≥1. c ≤ (∫x. u N x / N ∂M)" by auto
with Lim_bounded2[OF subcocycle_int_tendsto_avg_ereal[OF assms] this]
have "c ≤ subcocycle_avg_ereal u" by simp
then show ?thesis using True by auto
next
case False
then have fin: "subcocycle_avg_ereal u > - ∞" by simp
obtain cr::real where cr: "AE x in M. subcocycle_lim u x = cr"
using Invariant_func_is_AE_constant[OF subcocycle_lim_meas_Inv(2)] by blast
have "AE x in M. c = ereal cr" using c cr subcocycle_lim_real_ereal[OF assms fin] by force
then have "c = ereal cr" by auto
have "subcocycle_avg u = (∫x. subcocycle_lim u x ∂M)"
using subcocycle_lim_avg[OF assms fin] by auto
also have "... = (∫x. cr ∂M)"
apply (rule integral_cong_AE) using cr by auto
also have "... = cr"
by (simp add: prob_space.prob_space prob_space_axioms)
finally have "ereal(subcocycle_avg u) = ereal cr" by simp
then show ?thesis using ‹ c = ereal cr › subcocycle_avg_real_ereal[OF fin] by auto
qed
then show "AE x in M. subcocycle_lim_ereal u x = subcocycle_avg_ereal u" using c by auto
then show "AE x in M. subcocycle_lim u x = subcocycle_avg u"
unfolding subcocycle_lim_def subcocycle_avg_def by auto
qed
theorem kingman_theorem_AE_ereal:
assumes "subcocycle u"
shows "AE x in M. (λn. u n x / n) ⇢ subcocycle_avg_ereal u"
using kingman_theorem_AE_nonergodic_ereal[OF assms] subcocycle_ergodic_lim_avg(1)[OF assms] by auto
theorem kingman_theorem:
assumes "subcocycle u" "subcocycle_avg_ereal u > -∞"
shows "AE x in M. (λn. u n x / n) ⇢ subcocycle_avg u"
"(λn. (∫⇧+x. abs(u n x / n - subcocycle_avg u) ∂M)) ⇢ 0"
proof -
have *: "AE x in M. subcocycle_lim u x = subcocycle_avg u"
using subcocycle_ergodic_lim_avg(2)[OF assms(1)] by auto
then show "AE x in M. (λn. u n x / n) ⇢ subcocycle_avg u"
using kingman_theorem_nonergodic(1)[OF assms] by auto
have "(∫⇧+x. abs(u n x / n - subcocycle_avg u) ∂M) = (∫⇧+x. abs(u n x / n - subcocycle_lim u x) ∂M)" for n
apply (rule nn_integral_cong_AE) using * by auto
then show "(λn. (∫⇧+x. abs(u n x / n - subcocycle_avg u) ∂M)) ⇢ 0"
using kingman_theorem_nonergodic(3)[OF assms] by auto
qed
end
subsection ‹Subocycles for invertible maps›
text ‹If $T$ is invertible, then a subcocycle $u_n$ for $T$ gives rise to another subcocycle
for $T^{-1}$. Intuitively, if $u$ is subadditive along the time interval $[0,n)$, then
it should also be subadditive along the time interval $[-n,0)$. This is true, and
formalized with the following statement.›
proposition (in mpt) subcocycle_u_Tinv:
assumes "subcocycle u"
"invertible_qmpt"
shows "mpt.subcocycle M Tinv (λn x. u n (((Tinv)^^n) x))"
proof -
have bij: "bij T" using ‹invertible_qmpt› unfolding invertible_qmpt_def by auto
have int: "integrable M (u n)" for n
using subcocycle_integrable[OF assms(1)] by simp
interpret I: mpt M Tinv using Tinv_mpt[OF assms(2)] by simp
show "I.subcocycle (λn x. u n (((Tinv)^^n) x))" unfolding I.subcocycle_def
proof(auto)
show "integrable M (λx. u n ((Tinv ^^ n) x))" for n
using I.Tn_integral_preserving(1)[OF int[of n]] by simp
fix n m::nat and x::'a
define y where "y = (Tinv^^(m+n)) x"
have "(T^^m) y = (T^^m) ((Tinv^^m) ((Tinv^^n) x))" unfolding y_def by (simp add: funpow_add)
then have *: "(T^^m) y = (Tinv^^n) x"
using fn_o_inv_fn_is_id[OF bij, of m] by (metis Tinv_def comp_def)
have "u (n + m) ((Tinv ^^ (n + m)) x) = u (m+n) y"
unfolding y_def by (simp add: add.commute[of n m])
also have "... ≤ u m y + u n ((T^^m) y)"
using subcocycle_ineq[OF ‹subcocycle u›, of m n y] by simp
also have "... = u m ((Tinv^^(m+n)) x) + u n ((Tinv^^n) x)"
using * y_def by auto
finally show "u (n + m) ((Tinv ^^ (n + m)) x) ≤ u n ((Tinv ^^ n) x) + u m ((Tinv ^^ m) ((Tinv ^^ n) x))"
by (simp add: funpow_add)
qed
qed
text ‹The subcocycle averages for $T$ and $T^{-1}$ coincide.›
proposition (in mpt) subcocycle_avg_ereal_Tinv:
assumes "subcocycle u"
"invertible_qmpt"
shows "mpt.subcocycle_avg_ereal M (λn x. u n (((Tinv)^^n) x)) = subcocycle_avg_ereal u"
proof -
have bij: "bij T" using ‹invertible_qmpt› unfolding invertible_qmpt_def by auto
have int: "integrable M (u n)" for n
using subcocycle_integrable[OF assms(1)] by simp
interpret I: mpt M Tinv using Tinv_mpt[OF assms(2)] by simp
have "(λn. (∫x. u n (((Tinv)^^n) x) / n ∂M)) ⇢ I.subcocycle_avg_ereal (λn x. u n (((Tinv)^^n) x))"
using I.subcocycle_int_tendsto_avg_ereal[OF subcocycle_u_Tinv[OF assms]] by simp
moreover have "(∫x. u n x / n ∂M) = ereal (∫x. u n (((Tinv)^^n) x) / n ∂M)" for n
apply (simp)
apply (rule disjI2)
apply (rule I.Tn_integral_preserving(2)[symmetric])
apply (simp add: int)
done
ultimately have "(λn. (∫x. u n x / n ∂M)) ⇢ I.subcocycle_avg_ereal (λn x. u n (((Tinv)^^n) x))"
by presburger
moreover have "(λn. (∫x. u n x / n ∂M)) ⇢ subcocycle_avg_ereal u"
using subcocycle_int_tendsto_avg_ereal[OF ‹subcocycle u›] by simp
ultimately show ?thesis
using LIMSEQ_unique by simp
qed
text ‹The asymptotic limit of the subcocycle is the same for $T$ and $T^{-1}$. This is clear in the
ergodic case, and follows from the ergodic decomposition in the general case (on a standard
probability space). We give a direct proof below (on a general probability space) using the fact
that the asymptotic limit is the same for the subcocycle conditioned by the invariant sigma-algebra,
which is clearly the same for $T$ and $T^{-1}$ as it is constant along orbits.›
proposition (in fmpt) subcocycle_lim_ereal_Tinv:
assumes "subcocycle u"
"invertible_qmpt"
shows "AE x in M. fmpt.subcocycle_lim_ereal M Tinv (λn x. u n (((Tinv)^^n) x)) x = subcocycle_lim_ereal u x"
proof -
have bij: "bij T" using ‹invertible_qmpt› unfolding invertible_qmpt_def by auto
have int: "integrable M (u n)" for n
using subcocycle_integrable[OF assms(1)] by simp
interpret I: fmpt M Tinv using Tinv_fmpt[OF assms(2)] by simp
have *: "AE x in M. real_cond_exp M I.Invariants (λ x. u n (((Tinv)^^n) x)) x
= real_cond_exp M I.Invariants (u n) x" for n
using I.Invariants_of_foTn int unfolding o_def by simp
have "AE x in M. ∀n. real_cond_exp M I.Invariants (λ x. u n (((Tinv)^^n) x)) x
= real_cond_exp M I.Invariants (u n) x"
apply (subst AE_all_countable) using * by simp
moreover have "AE x in M. (λn. real_cond_exp M Invariants (u n) x / n) ⇢ subcocycle_lim_ereal u x"
using kingman_theorem_AE_nonergodic_invariant_ereal[OF ‹subcocycle u›] by simp
moreover have "AE x in M. (λn. real_cond_exp M I.Invariants (λ x. u n (((Tinv)^^n) x)) x / n)
⇢ I.subcocycle_lim_ereal (λ n x. u n (((Tinv)^^n) x)) x"
using I.kingman_theorem_AE_nonergodic_invariant_ereal[OF subcocycle_u_Tinv[OF assms]] by simp
moreover
{
fix x assume H: "∀n. real_cond_exp M I.Invariants (λ x. u n (((Tinv)^^n) x)) x
= real_cond_exp M I.Invariants (u n) x"
"(λn. real_cond_exp M Invariants (u n) x / n) ⇢ subcocycle_lim_ereal u x"
"(λn. real_cond_exp M I.Invariants (λ x. u n (((Tinv)^^n) x)) x / n)
⇢ I.subcocycle_lim_ereal (λ n x. u n (((Tinv)^^n) x)) x"
have "ereal(real_cond_exp M Invariants (u n) x / n)
= ereal(real_cond_exp M I.Invariants (λ x. u n (((Tinv)^^n) x)) x / n)" for n
using H(1) Invariants_Tinv[OF ‹invertible_qmpt›] by auto
then have "(λn. real_cond_exp M Invariants (u n) x / n)
⇢ I.subcocycle_lim_ereal (λ n x. u n (((Tinv)^^n) x)) x"
using H(3) by presburger
then have "I.subcocycle_lim_ereal (λ n x. u n (((Tinv)^^n) x)) x = subcocycle_lim_ereal u x"
using H(2) LIMSEQ_unique by auto
}
ultimately show ?thesis by auto
qed
proposition (in fmpt) subcocycle_lim_Tinv:
assumes "subcocycle u"
"invertible_qmpt"
shows "AE x in M. fmpt.subcocycle_lim M Tinv (λn x. u n (((Tinv)^^n) x)) x = subcocycle_lim u x"
proof -
interpret I: fmpt M Tinv using Tinv_fmpt[OF assms(2)] by simp
show ?thesis
unfolding subcocycle_lim_def I.subcocycle_lim_def
using subcocycle_lim_ereal_Tinv[OF assms] by auto
qed
end
Theory Gouezel_Karlsson
section ‹Gouezel-Karlsson›
theory Gouezel_Karlsson
imports Asymptotic_Density Kingman
begin
text ‹This section is devoted to the proof of the main ergodic result of
the article "Subadditive and multiplicative ergodic theorems" by Gouezel and
Karlsson~\cite{gouezel_karlsson}. It is a version of Kingman
theorem ensuring that, for subadditive cocycles, there are almost surely
many times $n$ where the cocycle is nearly additive at \emph{all} times
between $0$ and $n$.
This theorem is then used in this article to construct horofunctions
characterizing the behavior at infinity of compositions
of semi-contractions. This requires too many further notions to be implemented
in current Isabelle/HOL, but the main ergodic result is completely
proved below, in Theorem~\verb+Gouezel_Karlsson+, following the arguments in the paper (but in a
slightly more general setting here as we are not making any ergodicity assumption).
To simplify the exposition, the theorem is proved assuming that the limit of the subcocycle
vanishes almost everywhere, in the locale \verb+Gouezel_Karlsson_Kingman+.
The final result is proved by an easy reduction to this case.
The main steps of the proof are as follows:
\begin{itemize}
\item assume first that the map is invertible, and consider the inverse map and the corresponding
inverse subcocycle. With combinatorial arguments that only work for this inverse subcocycle, we
control the density of bad times given some allowed error $d>0$, in a precise quantitative way, in
Lemmas~\verb+upper_density_all_times+ and~\verb+upper_density_large_k+. We put these estimates
together in Lemma~\verb+upper_density_delta+.
\item These estimates are then transfered to the original time direction and the original subcocycle in
Lemma~\verb+upper_density_good_direction_invertible+. The fact that we have quantitative estimates
in terms of asymptotic densities is central here, just having some infiniteness statement would not be
enough.
\item The invertibility assumption is removed in Lemma~\verb+upper_density_good_direction+ by
using the result in the natural extension.
\item Finally, the main result is deduced in Lemma~\verb+infinite_AE+ (still assuming that the
asymptotic limit vanishes almost everywhere), and in full generality in
Theorem~\verb+Gouezel_Karlsson_Kingman+.
\end{itemize}
›
lemma upper_density_eventually_measure:
fixes a::real
assumes [measurable]: "⋀n. {x ∈ space M. P x n} ∈ sets M"
and "emeasure M {x ∈ space M. upper_asymptotic_density {n. P x n} < a} > b"
shows "∃N. emeasure M {x ∈ space M. ∀n ≥ N. card ({n. P x n} ∩ {..<n}) < a * n} > b"
proof -
define G where "G = {x ∈ space M. upper_asymptotic_density {n. P x n} < a}"
define H where "H = (λN. {x ∈ space M. ∀n ≥ N. card ({n. P x n} ∩ {..<n}) < a * n})"
have [measurable]: "G ∈ sets M" "⋀N. H N ∈ sets M" unfolding G_def H_def by auto
have "G ⊆ (⋃N. H N)"
proof
fix x assume "x ∈ G"
then have "x ∈ space M" unfolding G_def by simp
have "eventually (λn. card({n. P x n} ∩ {..<n}) < a * n) sequentially"
using ‹x ∈ G› unfolding G_def using upper_asymptotic_densityD by auto
then obtain N where "⋀n. n ≥ N ⟹ card({n. P x n} ∩ {..<n}) < a * n"
using eventually_sequentially by auto
then have "x ∈ H N" unfolding H_def using ‹x ∈ space M› by auto
then show "x ∈ (⋃N. H N)" by blast
qed
have "b < emeasure M G" using assms(2) unfolding G_def by simp
also have "... ≤ emeasure M (⋃N. H N)"
apply (rule emeasure_mono) using ‹G ⊆ (⋃N. H N)› by auto
finally have "emeasure M (⋃N. H N) > b" by simp
moreover have "(λN. emeasure M (H N)) ⇢ emeasure M (⋃N. H N)"
apply (rule Lim_emeasure_incseq) unfolding H_def incseq_def by auto
ultimately have "eventually (λN. emeasure M (H N) > b) sequentially"
by (simp add: order_tendsto_iff)
then obtain N where "emeasure M (H N) > b"
using eventually_False_sequentially eventually_mono by blast
then show ?thesis unfolding H_def by blast
qed
locale Gouezel_Karlsson_Kingman = pmpt +
fixes u::"nat ⇒ 'a ⇒ real"
assumes subu: "subcocycle u"
and subu_fin: "subcocycle_avg_ereal u > -∞"
and subu_0: "AE x in M. subcocycle_lim u x = 0"
begin
lemma int_u [measurable]:
"integrable M (u n)"
using subu unfolding subcocycle_def by auto
text ‹Next lemma is Lemma 2.1 in~\cite{gouezel_karlsson}.›
lemma upper_density_all_times:
assumes "d > (0::real)"
shows "∃c> (0::real).
emeasure M {x ∈ space M. upper_asymptotic_density {n. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - c * l} < d} > 1 - d"
proof -
define f where "f = (λx. abs (u 1 x))"
have [measurable]: "f ∈ borel_measurable M" unfolding f_def by auto
define G where "G = {x ∈ space M. (λn. birkhoff_sum f n x / n) ⇢ real_cond_exp M Invariants f x
∧ (λn. u n x / n) ⇢ 0}"
have [measurable]: "G ∈ sets M" unfolding G_def by auto
have "AE x in M. (λn. birkhoff_sum f n x / n) ⇢ real_cond_exp M Invariants f x"
apply (rule birkhoff_theorem_AE_nonergodic) using subu unfolding f_def subcocycle_def by auto
moreover have "AE x in M. (λn. u n x / n) ⇢ 0"
using subu_0 kingman_theorem_nonergodic(1)[OF subu subu_fin] by auto
ultimately have "AE x in M. x ∈ G" unfolding G_def by auto
then have "emeasure M G = 1" by (simp add: emeasure_eq_1_AE)
define V where "V = (λc x. {n. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - c * l})"
define Good where "Good = (λc. {x ∈ G. upper_asymptotic_density (V c x) < d})"
have [measurable]: "Good c ∈ sets M" for c unfolding Good_def V_def by auto
have I: "upper_asymptotic_density (V c x) ≤ real_cond_exp M Invariants f x / c" if "c>0" "x ∈ G" for c x
proof -
have [simp]: "c>0" "c ≠ 0" "c ≥ 0" using ‹c>0› by auto
define U where "U = (λn. abs(u 0 x) + birkhoff_sum f n x - c * card (V c x ∩ {1..n}))"
have main: "u n x ≤ U n" for n
proof (rule nat_less_induct)
fix n assume H: "∀m<n. u m x ≤ U m"
consider "n = 0" | "n≥1 ∧ n ∉ V c x" | "n≥1 ∧ n ∈ V c x" by linarith
then show "u n x ≤ U n"
proof (cases)
assume "n = 0"
then show ?thesis unfolding U_def by auto
next
assume A: "n≥1 ∧ n ∉ V c x"
then have "n ≥ 1" by simp
then have "n-1<n" by simp
have "{1..n} = {1..n-1} ∪ {n}" using ‹1 ≤ n› atLeastLessThanSuc by auto
then have *: "card (V c x ∩ {1..n}) = card (V c x ∩ {1..n-1})" using A by auto
have "u n x ≤ u (n-1) x + u 1 ((T^^(n-1)) x)"
using ‹n≥1› subu unfolding subcocycle_def by (metis le_add_diff_inverse2)
also have "... ≤ U (n-1) + f ((T^^(n-1)) x)" unfolding f_def using H ‹n-1<n› by auto
also have "... = abs(u 0 x) + birkhoff_sum f (n-1) x + f ((T^^(n-1)) x) - c * card (V c x ∩ {1..n-1})"
unfolding U_def by auto
also have "... = abs(u 0 x) + birkhoff_sum f n x - c * card (V c x ∩ {1..n})"
using * birkhoff_sum_cocycle[of f "n-1" 1 x] ‹1 ≤ n› by auto
also have "... = U n" unfolding U_def by simp
finally show ?thesis by auto
next
assume B: "n≥1 ∧ n ∈ V c x"
then obtain l where l: "l∈{1..n}" "u n x - u (n-l) x ≤ - c * l" unfolding V_def by blast
then have "n-l < n" by simp
have m: "- (r * ra) - r * rb = - (r * (rb + ra))" for r ra rb::real
by (simp add: algebra_simps)
have "card(V c x ∩ {1..n}) ≤ card ((V c x ∩ {1..n-l}) ∪ {n-l+1..n})"
by (rule card_mono, auto)
also have "... ≤ card (V c x ∩ {1..n-l}) + card {n-l+1..n}"
by (rule card_Un_le)
also have "... ≤ card (V c x ∩ {1..n-l}) + l" by auto
finally have "card(V c x ∩ {1..n}) ≤ card (V c x ∩ {1..n-l}) + real l" by auto
then have *: "-c * card (V c x ∩ {1..n-l}) - c * l ≤ -c * card(V c x ∩ {1..n})"
using m by auto
have "birkhoff_sum f ((n-l) + l) x = birkhoff_sum f (n-l) x + birkhoff_sum f l ((T^^(n-l))x)"
by (rule birkhoff_sum_cocycle)
moreover have "birkhoff_sum f l ((T^^(n-l))x) ≥ 0"
unfolding f_def birkhoff_sum_def using sum_nonneg by auto
ultimately have **: "birkhoff_sum f (n-l) x ≤ birkhoff_sum f n x" using l(1) by auto
have "u n x ≤ u (n-l) x - c * l" using l by simp
also have "... ≤ U (n-l) - c* l" using H ‹n-l < n› by auto
also have "... = abs(u 0 x) + birkhoff_sum f (n-l) x - c * card (V c x ∩ {1..n-l}) - c*l"
unfolding U_def by auto
also have "... ≤ abs(u 0 x) + birkhoff_sum f n x - c * card (V c x ∩ {1..n})"
using * ** by simp
finally show ?thesis unfolding U_def by auto
qed
qed
have "(λn. abs(u 0 x) * (1/n) + birkhoff_sum f n x / n - u n x / n) ⇢ abs(u 0 x) * 0 + real_cond_exp M Invariants f x - 0"
apply (intro tendsto_intros) using ‹x ∈ G› unfolding G_def by auto
moreover have "(abs(u 0 x) + birkhoff_sum f n x - u n x)/n = abs(u 0 x) * (1/n) + birkhoff_sum f n x / n - u n x / n" for n
by (auto simp add: add_divide_distrib diff_divide_distrib)
ultimately have "(λn. (abs(u 0 x) + birkhoff_sum f n x - u n x)/n) ⇢ real_cond_exp M Invariants f x"
by auto
then have a: "limsup (λn. (abs(u 0 x) + birkhoff_sum f n x - u n x)/n) = real_cond_exp M Invariants f x"
by (simp add: assms lim_imp_Limsup)
have "c * card (V c x ∩ {1..n})/n ≤ (abs(u 0 x) + birkhoff_sum f n x - u n x)/n" for n
using main[of n] unfolding U_def by (simp add: divide_right_mono)
then have "limsup (λn. c * card (V c x ∩ {1..n})/n) ≤ limsup (λn. (abs(u 0 x) + birkhoff_sum f n x - u n x)/n)"
by (simp add: Limsup_mono)
then have b: "limsup (λn. c * card (V c x ∩ {1..n})/n) ≤ real_cond_exp M Invariants f x"
using a by simp
have "ereal(upper_asymptotic_density (V c x)) = limsup (λn. card (V c x ∩ {1..n})/n)"
using upper_asymptotic_density_shift[of "V c x" 1 0] by auto
also have "... = limsup (λn. ereal(1/c) * ereal(c * card (V c x ∩ {1..n})/n))"
by auto
also have "... = (1/c) * limsup (λn. c * card (V c x ∩ {1..n})/n)"
by (rule limsup_ereal_mult_left, auto)
also have "... ≤ ereal (1/c) * real_cond_exp M Invariants f x"
by (rule ereal_mult_left_mono[OF b], auto)
finally show "upper_asymptotic_density (V c x) ≤ real_cond_exp M Invariants f x / c"
by auto
qed
{
fix r::real
obtain c::nat where "r / d < c" using reals_Archimedean2 by auto
then have "r/d < real c+1" by auto
then have "r / (real c+1) < d" using ‹d>0› by (simp add: divide_less_eq mult.commute)
then have "∃c::nat. r / (real c+1) < d" by auto
}
then have unG: "(⋃c::nat. {x ∈ G. real_cond_exp M Invariants f x / (c+1) < d}) = G"
by auto
have *: "r < d * (real n + 1)" if "m ≤ n" "r < d * (real m + 1)" for m n r
proof -
have "d * (real m + 1) ≤ d * (real n + 1)" using ‹d>0› ‹m ≤ n› by auto
then show ?thesis using ‹r < d * (real m + 1)› by auto
qed
have "(λc. emeasure M {x ∈ G. real_cond_exp M Invariants f x / (real c+1) < d})
⇢ emeasure M (⋃c::nat. {x ∈ G. real_cond_exp M Invariants f x / (c+1) < d})"
apply (rule Lim_emeasure_incseq) unfolding incseq_def by (auto simp add: divide_simps *)
then have "(λc. emeasure M {x ∈ G. real_cond_exp M Invariants f x / (real c+1) < d}) ⇢ emeasure M G"
using unG by auto
then have "(λc. emeasure M {x ∈ G. real_cond_exp M Invariants f x / (real c+1) < d}) ⇢ 1"
using ‹emeasure M G = 1› by simp
then have "eventually (λc. emeasure M {x ∈ G. real_cond_exp M Invariants f x / (real c+1) < d} > 1 - d) sequentially"
apply (rule order_tendstoD)
apply (insert ‹0<d›, auto simp add: ennreal_1[symmetric] ennreal_lessI simp del: ennreal_1)
done
then obtain c0 where c0: "emeasure M {x ∈ G. real_cond_exp M Invariants f x / (real c0+1) < d} > 1 - d"
using eventually_sequentially by auto
define c where "c = real c0 + 1"
then have "c > 0" by auto
have *: "emeasure M {x ∈ G. real_cond_exp M Invariants f x / c < d} > 1 - d"
unfolding c_def using c0 by auto
have "{x ∈ G. real_cond_exp M Invariants f x / c < d} ⊆ {x ∈ space M. upper_asymptotic_density (V c x) < d}"
apply auto
using G_def apply blast
using I[OF ‹c>0›] by fastforce
then have "emeasure M {x ∈ G. real_cond_exp M Invariants f x / c < d} ≤ emeasure M {x ∈ space M. upper_asymptotic_density (V c x) < d}"
apply (rule emeasure_mono) unfolding V_def by auto
then have "emeasure M {x ∈ space M. upper_asymptotic_density (V c x) < d} > 1 - d" using * by auto
then show ?thesis unfolding V_def using ‹c>0› by auto
qed
text ‹Next lemma is Lemma 2.2 in~\cite{gouezel_karlsson}.›
lemma upper_density_large_k:
assumes "d > (0::real)" "d ≤ 1"
shows "∃k::nat.
emeasure M {x ∈ space M. upper_asymptotic_density {n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - d * l} < d} > 1 - d"
proof -
have [simp]: "d>0" "d ≠ 0" "d ≥ 0" using ‹d>0› by auto
define rho where "rho = d * d * d / 4"
have [simp]: "rho > 0" "rho ≠ 0" "rho ≥ 0" unfolding rho_def using assms by auto
text ‹First step: choose a time scale $s$ at which all the computations will be done. the
integral of $u_s$ should be suitably small -- how small precisely is given by $\rho$.›
have "ennreal(∫x. abs(u n x / n) ∂M) = (∫⇧+x. abs(u n x /n - subcocycle_lim u x) ∂M)" for n
proof -
have "ennreal(∫x. abs(u n x / n) ∂M) = (∫⇧+x. abs(u n x /n) ∂M)"
apply (rule nn_integral_eq_integral[symmetric]) using int_u by auto
also have "... = (∫⇧+x. abs(u n x /n - subcocycle_lim u x) ∂M)"
apply (rule nn_integral_cong_AE) using subu_0 by auto
finally show ?thesis by simp
qed
moreover have "(λn. ∫⇧+x. abs(u n x /n - subcocycle_lim u x) ∂M) ⇢ 0"
by (rule kingman_theorem_nonergodic(3)[OF subu subu_fin])
ultimately have "(λn. ennreal(∫x. abs(u n x / n) ∂M)) ⇢ 0"
by auto
then have "(λn. (∫x. abs(u n x / n) ∂M)) ⇢ 0"
by (simp add: ennreal_0[symmetric] del: ennreal_0)
then have "eventually (λn. (∫x. abs(u n x / n) ∂M) < rho) sequentially"
by (rule order_tendstoD(2), auto)
then obtain s::nat where [simp]: "s>0" and s_int: "(∫x. abs(u s x / s) ∂M) < rho"
by (metis (mono_tags, lifting) neq0_conv eventually_sequentially gr_implies_not0
linorder_not_le of_nat_0_eq_iff order_refl zero_neq_one)
text ‹Second step: a truncation argument, to decompose $|u_1|$ as a sum of a constant (its
contribution will be small if $k$ is large at the end of the argument) and of a function with
small integral).›
have "(λn. (∫x. abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ n} x ∂M)) ⇢ (∫x. 0 ∂M)"
proof (rule integral_dominated_convergence[where ?w = "λx. abs(u 1 x)"])
show "AE x in M. norm (abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ n} x) ≤ abs(u 1 x)" for n
unfolding indicator_def by auto
{
fix x
have "abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ n} x = (0::real)" if "n > abs(u 1 x)" for n::nat
unfolding indicator_def using that by auto
then have "eventually (λn. abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ n} x = 0) sequentially"
by (metis (mono_tags, lifting) eventually_at_top_linorder reals_Archimedean2 less_le_trans of_nat_le_iff)
then have "(λn. abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ n} x) ⇢ 0"
by (rule tendsto_eventually)
}
then show "AE x in M. (λn. abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ n} x) ⇢ 0"
by simp
qed (auto simp add: int_u)
then have "eventually (λn. (∫x. abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ n} x ∂M) < rho) sequentially"
by (rule order_tendstoD(2), auto)
then obtain Knat::nat where Knat: "Knat > 0" "(∫x. abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ Knat} x ∂M) < rho"
by (metis (mono_tags, lifting) eventually_sequentially gr_implies_not0 neq0_conv
linorder_not_le of_nat_0_eq_iff order_refl zero_neq_one)
define K where "K = real Knat"
then have [simp]: "K ≥ 0" "K>0" and K: "(∫x. abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ K} x ∂M) < rho"
using Knat by auto
define F where "F = (λx. abs(u 1 x) * indicator {x. abs(u 1 x) ≥ K} x)"
have int_F [measurable]: "integrable M F"
unfolding F_def apply (rule Bochner_Integration.integrable_bound[where ?f = "λx. abs(u 1 x)"])
unfolding indicator_def by (auto simp add: int_u)
have "(∫x. F x ∂M) = (∫x. abs(u 1 x) * indicator {x ∈ space M. abs(u 1 x) ≥ K} x ∂M)"
apply (rule integral_cong_AE) unfolding F_def by (auto simp add: indicator_def)
then have F_int: "(∫x. F x ∂M) < rho" using K by auto
have F_pos: "F x ≥ 0" for x unfolding F_def by auto
have u1_bound: "abs(u 1 x) ≤ K + F x" for x
unfolding F_def indicator_def apply (cases "x ∈ {x ∈ space M. K ≤ ¦u 1 x¦}") by auto
define F2 where "F2 = (λx. F x + abs(u s x/s))"
have int_F2 [measurable]: "integrable M F2"
unfolding F2_def using int_F int_u[of s] by auto
have F2_pos: "F2 x ≥ 0" for x unfolding F2_def using F_pos by auto
have "(∫x. F2 x ∂M) = (∫x. F x ∂M) + (∫x. abs(u s x/s) ∂M)"
unfolding F2_def apply (rule Bochner_Integration.integral_add) using int_F int_u by auto
then have F2_int: "(∫x. F2 x ∂M) < 2 * rho"
using F_int s_int by auto
text ‹We can now choose $k$, large enough. The reason for our choice will only appear
at the end of the proof.›
define k where "k = max (2 * s + 1) (nat(ceiling((2 * d * s + 2 * K * s)/(d/2))))"
have "k > 2 * s" unfolding k_def by auto
have "k ≥ (2 * d * s + 2 * K * s)/(d/2)"
unfolding k_def by linarith
then have "(2 * d * s + 2 * K * s)/k ≤ d/2"
using ‹k > 2 * s› by (simp add: divide_simps mult.commute)
text ‹Third step: definition of a good set $G$ where everything goes well.›
define G where "G = {x ∈ space M. (λn. u n x / n) ⇢ 0
∧ (λn. birkhoff_sum (λx. abs(u s x / s)) n x / n) ⇢ real_cond_exp M Invariants (λx. abs(u s x / s)) x
∧ (λn. birkhoff_sum F n x / n) ⇢ real_cond_exp M Invariants F x
∧ real_cond_exp M Invariants F x + real_cond_exp M Invariants (λx. abs(u s x / s)) x = real_cond_exp M Invariants F2 x}"
have [measurable]: "G ∈ sets M" unfolding G_def by auto
have "AE x in M. (λn. u n x / n) ⇢ 0"
using kingman_theorem_nonergodic(1)[OF subu subu_fin] subu_0 by auto
moreover have "AE x in M.(λn. birkhoff_sum (λx. abs(u s x / s)) n x / n) ⇢ real_cond_exp M Invariants (λx. abs(u s x / s)) x"
apply (rule birkhoff_theorem_AE_nonergodic) using int_u[of s] by auto
moreover have "AE x in M. (λn. birkhoff_sum F n x / n) ⇢ real_cond_exp M Invariants F x"
by (rule birkhoff_theorem_AE_nonergodic[OF int_F])
moreover have "AE x in M. real_cond_exp M Invariants F x + real_cond_exp M Invariants (λx. abs(u s x / s)) x = real_cond_exp M Invariants F2 x"
unfolding F2_def apply (rule AE_symmetric[OF real_cond_exp_add]) using int_u[of s] int_F int_u[of s] by auto
ultimately have "AE x in M. x ∈ G" unfolding G_def by auto
then have "emeasure M G = 1" by (simp add: emeasure_eq_1_AE)
text ‹Estimation of asymptotic densities of bad times, for points in $G$.
There is a trivial part, named $U$ below, that has to be treated separately because it creates
problematic boundary effects.›
define U where "U = (λx. {n. ∃l ∈ {n-s<..n}. u n x - u (n-l) x ≤ - d * l})"
define V where "V = (λx. {n. ∃l ∈ {k..n-s}. u n x - u (n-l) x ≤ - d * l})"
text ‹Trivial estimate for $U(x)$: this set is finite for $x\in G$.›
have densU: "upper_asymptotic_density (U x) = 0" if "x ∈ G" for x
proof -
define C where "C = Max {abs(u m x) |m. m<s} + d * s"
have *: "U x ⊆ {n. u n x ≤ C - d * n}"
proof (auto)
fix n assume "n ∈ U x"
then obtain l where l: "l∈ {n-s <..n}" "u n x - u (n-l) x ≤ - d * l" unfolding U_def by auto
define m where "m = n-l"
have "m < s" unfolding m_def using l by auto
have "u n x ≤ u m x - d * l" using l m_def by auto
also have "... ≤ abs(u m x) - d * n + d * m" unfolding m_def using l
by (simp add: algebra_simps of_nat_diff)
also have "... ≤ Max {abs(u m x) |m. m<s} - d * n + d * m"
using ‹m < s› apply (auto) by (rule Max_ge, auto)
also have "... ≤ Max {abs(u m x) |m. m<s} + d * s - d * n"
using ‹m < s› ‹d>0› by auto
finally show "u n x ≤ C - d * n"
unfolding C_def by auto
qed
have "eventually (λn. u n x / n > -d/2) sequentially"
apply (rule order_tendstoD(1)) using ‹x ∈ G› ‹d>0› unfolding G_def by auto
then obtain N where N: "⋀n. n ≥ N ⟹ u n x / n > - d/2"
using eventually_sequentially by auto
{
fix n assume *: "u n x ≤ C - d * n" "n > N"
then have "n ≥ N" "n > 0" by auto
have "2 * u n x ≤ 2 * C - 2 * d * n" using * by auto
moreover have "2 * u n x ≥ - d * n" using N[OF ‹n ≥ N›] ‹n > 0› by (simp add: divide_simps)
ultimately have "- d * n ≤ 2 * C - 2 * d * n" by auto
then have "d * n ≤ 2 * C" by auto
then have "n ≤ 2 * C / d" using ‹d>0› by (simp add: mult.commute divide_simps)
}
then have "{n. u n x ≤ C - d * n} ⊆ {..max (nat (floor(2*C/d))) N}"
by (auto, meson le_max_iff_disj le_nat_floor not_le)
then have "finite {n. u n x ≤ C - d * n}"
using finite_subset by blast
then have "finite (U x)" using * finite_subset by blast
then show ?thesis using upper_asymptotic_density_finite by auto
qed
text ‹Main step: control of $u$ along the sequence $ns+t$, with a term
$-(d/2) Card(V(x) \cap [1,ns+t])$ on the right.
Then, after averaging in $t$, Birkhoff theorem will imply that
$Card(V(x) \cap [1,n])$ is suitably small.›
define Z where "Z = (λt n x. Max {u i x|i. i< s} + (∑i<n. abs(u s ((T^^(i * s + t))x)))
+ birkhoff_sum F (n * s + t) x - (d/2) * card(V x ∩ {1..n * s + t}))"
have Main: "u (n * s + t) x ≤ Z t n x" if "t < s" for n x t
proof (rule nat_less_induct[where ?n = n])
fix n assume H: "∀m<n. u (m * s + t) x ≤ Z t m x"
consider "n = 0"|"n>0 ∧ V x ∩ {(n-1) * s+t<..n * s+t} = {}"|"n>0 ∧ V x ∩ {(n-1) * s+t<..n * s+t} ≠ {}" by auto
then show "u (n * s+t) x ≤ Z t n x"
proof (cases)
assume "n = 0"
then have "V x ∩ {1..n * s + t} = {}" unfolding V_def using ‹t<s› ‹k>2* s› by auto
then have *: "card(V x ∩ {1..n * s+t}) = 0" by simp
have **: "0 ≤ (∑i<t. F ((T ^^ i) x))" by (rule sum_nonneg, simp add: F_pos)
have "u (n * s + t) x = u t x" using ‹n = 0› by auto
also have "... ≤ Max {u i x|i. i< s}" by (rule Max_ge, auto simp add: ‹t<s›)
also have "... ≤ Z t n x"
unfolding Z_def birkhoff_sum_def using ‹n = 0› * ** by auto
finally show ?thesis by simp
next
assume A: "n>0 ∧ V x ∩ {(n-1) * s+t<..n * s+t} = {}"
then have "n≥1" by simp
have "n * s + t = (n-1) * s + t + s" using ‹n≥1› by (simp add: add.commute add.left_commute mult_eq_if)
have "V x ∩ {1..n * s + t} = V x ∩ {1..(n-1) * s + t} ∪ V x ∩ {(n-1) * s + t<..n * s + t}"
using ‹n≥1› by (auto, simp add: mult_eq_if)
then have *: "card(V x ∩ {1..n * s+t}) = card(V x ∩ {1..(n-1) * s+t})" using A by auto
have **: "birkhoff_sum F ((n-1) * s + t) x ≤ birkhoff_sum F (n * s + t) x"
unfolding birkhoff_sum_def apply (rule sum_mono2)
using ‹n * s+t = (n-1) * s+t + s› F_pos by auto
have "(∑i<n-1. abs(u s ((T^^(i * s+t))x))) + u s ((T^^((n-1) * s+t)) x)
≤ (∑i<n-1. abs(u s ((T^^(i * s+t))x))) + abs(u s ((T^^((n-1) * s+t)) x))" by auto
also have "... ≤ (∑i<n. abs(u s ((T^^(i* s+t))x)))"
using ‹n≥1› lessThan_Suc_atMost sum.lessThan_Suc[of "λi. abs(u s ((T^^(i* s+t))x))" "n-1", symmetric] by auto
finally have ***: "(∑i<n-1. abs(u s ((T^^(i* s+t))x))) + u s ((T^^((n-1) * s+t)) x) ≤ (∑i<n. abs(u s ((T^^(i* s+t))x)))"
by simp
have "u (n * s+t) x = u ((n-1) * s+t + s) x"
using ‹n≥1› by (simp add: add.commute add.left_commute mult_eq_if)
also have "... ≤ u ((n-1) * s+t) x + u s ((T^^((n-1) * s+t)) x)"
using subcocycle_ineq[OF subu, of "(n-1) * s+t" s x] by simp
also have "... ≤ Max {u i x|i. i< s} + (∑i<n-1. abs(u s ((T^^(i* s+t))x)))
+ birkhoff_sum F ((n-1) * s+t) x - (d/2) * card(V x ∩ {1..(n-1) * s+t}) + u s ((T^^((n-1) * s+t)) x)"
using H ‹n≥1› unfolding Z_def by auto
also have "... ≤ Max {u i x|i. i< s} + (∑i<n. abs(u s ((T^^(i* s+t))x)))
+ birkhoff_sum F (n * s+t) x - (d/2) * card(V x ∩ {1..n * s+t})"
using * ** *** by auto
also have "... ≤ Z t n x" unfolding Z_def by (auto simp add: divide_simps)
finally show ?thesis by simp
next
assume B: "n>0 ∧ V x ∩ {(n-1) * s+t<..n * s+t} ≠ {}"
then have [simp]: "n>0" "n≥1" "n ≠ 0" by auto
obtain m where m: "m ∈ V x ∩ {(n-1) * s + t<..n * s + t}" using B by blast
then obtain l where l: "l ∈ {k..m-s}" "u m x - u (m-l) x ≤ - d * l" unfolding V_def by auto
then have "m-s>0" using ‹k>2* s› by auto
then have "m-l ≥ s" using l by auto
define p where "p = (m-l-t) div s"
have p1: "m-l ≥ p * s + t"
unfolding p_def using ‹m-l ≥ s› ‹s>t› minus_mod_eq_div_mult [symmetric, of "m - l - t" s]
by simp
have p2: "m-l < p* s + t + s"
unfolding p_def using ‹m-l ≥ s› ‹s>t›
div_mult_mod_eq[of "m-l-t" s] mod_less_divisor[OF ‹s>0›, of "m-l-t"] by linarith
then have "l ≥ m - p * s - t -s" by auto
then have "l ≥ (n-1) * s + t -p * s - t- s" using m by auto
then have "l + 2 * s≥ (n * s + t) - (p * s+t)" by (simp add: diff_mult_distrib)
have "(p+1) * s + t ≤ (n-1) * s + t"
using ‹k> 2 * s› m l(1) p1 by (auto simp add: algebra_simps)
then have "p+1 ≤ n-1"
using ‹s>0› by (meson add_le_cancel_right mult_le_cancel2)
then have "p ≤ n-1" "p<n" by auto
have "(p* s + t) + k ≤ (n * s + t)"
using m l(1) p1 by (auto simp add: algebra_simps)
then have "(1::real) ≤ ((n * s + t) - (p* s + t)) / k"
using ‹k > 2* s› by auto
have In: "u (n * s + t) x ≤ u m x + (∑i ∈ {(n-1) * s + t..<n * s + t}. abs(u 1 ((T^^i) x)))"
proof (cases "m = n * s + t")
case True
have "(∑i ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^i) x))) ≥ 0"
by (rule sum_nonneg, auto)
then show ?thesis using True by auto
next
case False
then have m2: "n * s + t - m >0" "(n-1) * s+t ≤ m" using m by auto
have "birkhoff_sum (u 1) (n * s+t-m) ((T^^m) x) = (∑i<n * s+t-m. u 1 ((T^^i)((T^^m) x)))"
unfolding birkhoff_sum_def by auto
also have "... = (∑i<n * s+t-m. u 1 ((T^^(i+m)) x))"
by (simp add: funpow_add)
also have "... = (∑j ∈ {m..<n * s+t}. u 1 ((T^^j) x))"
by (rule sum.reindex_bij_betw, rule bij_betw_byWitness[where ?f' = "λi. i - m"], auto)
also have "... ≤ (∑j ∈ {m..<n * s+t}. abs(u 1 ((T^^j) x)))"
by (rule sum_mono, auto)
also have "... ≤ (∑j ∈ {(n-1) * s+t..<m}. abs(u 1 ((T^^j) x))) + (∑j ∈ {m..<n * s+t}. abs(u 1 ((T^^j) x)))"
by auto
also have "... = (∑j ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^j) x)))"
apply (rule sum.atLeastLessThan_concat) using m2 by auto
finally have *: "birkhoff_sum (u 1) (n * s+t-m) ((T^^m) x) ≤ (∑j ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^j) x)))"
by auto
have "u (n * s+t) x ≤ u m x + u (n * s+t-m) ((T^^m) x)"
using subcocycle_ineq[OF subu, of m "n * s+t-m"] m2 by auto
also have "... ≤ u m x + birkhoff_sum (u 1) (n * s+t-m) ((T^^m) x)"
using subcocycle_bounded_by_birkhoff1[OF subu ‹n * s+t - m >0›, of "(T^^m)x"] by simp
finally show ?thesis using * by auto
qed
have Ip: "u (m-l) x ≤ u (p* s+t) x + (∑i ∈ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x)))"
proof (cases "m-l = p* s+t")
case True
have "(∑i ∈ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) ≥ 0"
by (rule sum_nonneg, auto)
then show ?thesis using True by auto
next
case False
then have "m-l - (p* s+t) > 0" using p1 by auto
have I: "p * s + t + (m - l - (p * s + t)) = m - l" using p1 by auto
have "birkhoff_sum (u 1) (m-l - (p* s+t)) ((T^^(p* s+t)) x) = (∑i<m-l - (p* s+t). u 1 ((T^^i) ((T^^(p* s+t)) x)))"
unfolding birkhoff_sum_def by auto
also have "... = (∑i<m-l - (p* s+t). u 1 ((T^^(i+p* s+t)) x))"
by (simp add: funpow_add)
also have "... = (∑j ∈ {p* s+t..<m-l}. u 1 ((T^^j) x))"
by (rule sum.reindex_bij_betw, rule bij_betw_byWitness[where ?f' = "λi. i - (p* s+t)"], auto)
also have "... ≤ (∑j ∈ {p* s+t..<m-l}. abs(u 1 ((T^^j) x)))"
by (rule sum_mono, auto)
also have "... ≤ (∑j ∈ {p* s+t..<m-l}. abs(u 1 ((T^^j) x))) + (∑j ∈ {m-l..<(p+1)* s+t}. abs(u 1 ((T^^j) x)))"
by auto
also have "... = (∑j ∈ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^j) x)))"
apply (rule sum.atLeastLessThan_concat) using p1 p2 by auto
finally have *: "birkhoff_sum (u 1) (m-l - (p* s+t)) ((T^^(p* s+t)) x)
≤ (∑j ∈ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^j) x)))"
by auto
have "u (m-l) x ≤ u (p* s+t) x + u (m-l - (p* s+t)) ((T^^(p* s+t)) x)"
using subcocycle_ineq[OF subu, of "p* s+t" "m-l - (p* s+t)" x] I by auto
also have "... ≤ u (p* s+t) x + birkhoff_sum (u 1) (m-l - (p* s+t)) ((T^^(p* s+t)) x)"
using subcocycle_bounded_by_birkhoff1[OF subu ‹m-l - (p* s+t) > 0›, of "(T^^(p* s+t)) x"] by simp
finally show ?thesis using * by auto
qed
have "(∑i ∈ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) ≤ (∑i ∈ {p* s+t..<(p+1)* s+t}. K + F ((T^^i) x))"
apply (rule sum_mono) using u1_bound by auto
moreover have "(∑i ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^i) x))) ≤ (∑i ∈ {(n-1) * s+t..<n * s+t}. K + F ((T^^i) x))"
apply (rule sum_mono) using u1_bound by auto
ultimately have "(∑i ∈ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) + (∑i ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^i) x)))
≤ (∑i ∈ {p* s+t..<(p+1)* s+t}. K + F ((T^^i) x)) + (∑i ∈ {(n-1) * s+t..<n * s+t}. K + F ((T^^i) x))"
by auto
also have "... = 2* K* s + (∑i ∈ {p* s+t..<(p+1)* s+t}. F ((T^^i) x)) + (∑i ∈{(n-1) * s+t..<n * s+t}. F ((T^^i) x))"
by (auto simp add: mult_eq_if sum.distrib)
also have "... ≤ 2* K * s + (∑i ∈ {p* s+t..<(n-1) * s+t}. F ((T^^i) x)) + (∑i ∈{(n-1) * s+t..<n * s+t}. F ((T^^i) x))"
apply (auto, rule sum_mono2) using ‹(p+1)* s+t≤(n-1) * s+t› F_pos by auto
also have "... = 2* K * s + (∑i ∈ {p* s+t..<n * s+t}. F ((T^^i) x))"
apply (auto, rule sum.atLeastLessThan_concat) using ‹p≤n-1› by auto
finally have A0: "(∑i ∈ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) + (∑i ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^i) x)))
≤ 2* K * s + (∑i ∈ {p* s+t..<n * s+t}. F ((T^^i) x))"
by simp
have "card(V x ∩ {p * s + t<.. n * s+t}) ≤ card {p * s + t<.. n * s+t}" by (rule card_mono, auto)
have "2 * d * s + 2 * K * s > 0" using ‹K>0› ‹s>0› ‹d>0›
by (metis add_pos_pos mult_2 mult_zero_left of_nat_0_less_iff pos_divide_less_eq times_divide_eq_right)
then have "2 * d * s + 2 * K * s ≤ ((n * s + t) - (p* s + t)) * ((2 * d * s + 2 * K * s) / k)"
using ‹1 ≤ ((n * s + t) - (p* s + t)) / k› by (simp add: le_divide_eq_1 pos_le_divide_eq)
also have "... ≤ ((n * s + t) - (p* s + t)) * (d/2)"
apply (rule mult_left_mono) using ‹(2 * d * s + 2 * K * s)/k ≤ d/2› by auto
finally have "2 * d * s + 2 * K * s ≤ ((n * s + t) - (p* s + t)) * (d/2)"
by auto
then have "-d * ((n * s+t) - (p* s+t)) + 2 * d * s + 2 * K * s ≤ -d * ((n * s+t) - (p* s+t)) + ((n * s + t) - (p* s + t)) * (d/2)"
by linarith
also have "... = (-d/2) * card {p * s + t<.. n * s+t}"
by auto
also have "... ≤ (-d/2) * card(V x ∩ {p * s + t<.. n * s+t})"
using ‹card(V x ∩ {p * s + t<.. n * s+t}) ≤ card {p * s + t<.. n * s+t}› by auto
finally have A1: "-d * ((n * s+t) - (p* s+t)) + 2 * d * s + 2 * K * s ≤ (-d/2) * card(V x ∩ {p * s + t<.. n * s+t})"
by simp
have "V x ∩ {1.. n * s+t} = V x ∩ {1..p * s + t} ∪ V x ∩ {p * s + t<.. n * s+t}"
using ‹p * s + t + k ≤ n * s + t› by auto
then have "card (V x ∩ {1.. n * s+t}) = card(V x ∩ {1..p * s + t} ∪ V x ∩ {p * s + t<.. n * s+t})"
by auto
also have "... = card (V x ∩ {1..p * s + t}) + card (V x ∩ {p * s + t<.. n * s+t})"
by (rule card_Un_disjoint, auto)
finally have A2: "card (V x ∩ {1..p * s + t}) + card (V x ∩ {p * s + t<.. n * s+t}) = card (V x ∩ {1.. n * s+t})"
by simp
have A3: "(∑i<p. abs(u s ((T ^^ (i * s + t)) x))) ≤ (∑i<n. abs(u s ((T ^^ (i * s + t)) x)))"
apply (rule sum_mono2) using ‹p≤n-1› by auto
have A4: "birkhoff_sum F (p * s + t) x + (∑i ∈ {p* s+t..<n * s+t}. F ((T^^i) x)) = birkhoff_sum F (n * s + t) x"
unfolding birkhoff_sum_def apply (subst atLeast0LessThan[symmetric])+ apply (rule sum.atLeastLessThan_concat)
using ‹p≤n-1› by auto
have "u (n * s+t) x ≤ u m x + (∑i ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^i) x)))"
using In by simp
also have "... ≤ (u m x - u (m-l) x) + u (m-l) x + (∑i ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^i) x)))"
by simp
also have "... ≤ - d * l + u (p* s+t) x + (∑i ∈ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) + (∑i ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^i) x)))"
using Ip l by auto
also have "... ≤ - d * ((n * s+t) - (p* s+t)) + 2*d* s + u (p* s+t) x + (∑i ∈ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) + (∑i ∈ {(n-1) * s+t..<n * s+t}. abs(u 1 ((T^^i) x)))"
using ‹l + 2* s≥ (n * s+t) - (p* s+t)› apply (auto simp add: algebra_simps)
by (metis assms(1) distrib_left mult.commute mult_2 of_nat_add of_nat_le_iff mult_le_cancel_iff2)
also have "... ≤ -d * ((n * s+t) - (p* s+t)) + 2*d* s + Z t p x + 2* K * s + (∑i ∈ {p* s+t..<n * s+t}. F ((T^^i) x))"
using A0 H ‹p<n› by auto
also have "... ≤ Z t p x - d/2 * card(V x ∩ {p * s + t<.. n * s+t}) + (∑i ∈ {p* s+t..<n * s+t}. F ((T^^i) x))"
using A1 by auto
also have "... = Max {u i x |i. i < s} + (∑i<p. abs(u s ((T ^^ (i * s + t)) x))) + birkhoff_sum F (p * s + t) x
- d / 2 * card (V x ∩ {1..p * s + t}) - d/2 * card(V x ∩ {p * s + t<.. n * s+t}) + (∑i ∈ {p* s+t..<n * s+t}. F ((T^^i) x))"
unfolding Z_def by auto
also have "... ≤ Max {u i x |i. i < s} + (∑i<n. abs(u s ((T ^^ (i * s + t)) x)))
+ (birkhoff_sum F (p * s + t) x + (∑i ∈ {p* s+t..<n * s+t}. F ((T^^i) x)))
- d/2 * card (V x ∩ {1..p * s + t}) - d/2 * card(V x ∩ {p * s + t<.. n * s+t})"
using A3 by auto
also have "... = Z t n x"
unfolding Z_def using A2 A4 by (auto simp add: algebra_simps, metis distrib_left of_nat_add)
finally show ?thesis by simp
qed
qed
have Main2: "(d/2) * card(V x ∩ {1..n}) ≤ Max {u i x|i. i< s} + birkhoff_sum (λx. abs(u s x/ s)) (n+2* s) x
+ birkhoff_sum F (n + 2 * s) x + (1/s) * (∑i< 2 * s. abs(u (n+i) x))" for n x
proof -
define N where "N = (n div s) + 1"
then have "n ≤ N * s"
using ‹s > 0› dividend_less_div_times less_or_eq_imp_le by auto
have "N * s ≤ n + s"
by (auto simp add: N_def)
have eq_t: "(d/2) * card(V x ∩ {1..n}) ≤ abs(u(N* s+t) x) + (Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x)
+ (∑i<N. abs(u s ((T^^(i * s+t))x)))"
if "t<s" for t
proof -
have *: "birkhoff_sum F (N * s+t) x ≤ birkhoff_sum F (n+ 2* s) x"
unfolding birkhoff_sum_def apply (rule sum_mono2) using F_pos ‹N * s ≤ n + s› ‹t<s› by auto
have "card(V x ∩ {1..n}) ≤ card(V x ∩ {1..N* s+t})"
apply (rule card_mono) using ‹n ≤ N * s› by auto
then have "(d/2) * card(V x ∩ {1..n}) ≤ (d/2) * card(V x ∩ {1..N* s+t})"
by auto
also have "... ≤ - u (N* s+t) x + Max {u i x|i. i< s} + (∑i<N. abs(u s ((T^^(i* s+t))x))) + birkhoff_sum F (N * s+t) x"
using Main[OF ‹t < s›, of N x] unfolding Z_def by auto
also have "... ≤ abs(u(N* s+t) x) + Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x + (∑i<N. abs(u s ((T^^(i* s+t))x)))"
using * by auto
finally show ?thesis by simp
qed
have "(∑t<s. abs(u(N* s+t) x)) = (∑i∈{N* s..<N* s+s}. abs (u i x))"
by (rule sum.reindex_bij_betw, rule bij_betw_byWitness[where ?f' = "λi. i - N* s"], auto)
also have "... ≤ (∑i∈{n..<n + 2* s}. abs (u i x))"
apply (rule sum_mono2) using ‹n ≤ N * s› ‹N * s ≤ n + s› by auto
also have "... = (∑i<2* s. abs (u (n+i) x))"
by (rule sum.reindex_bij_betw[symmetric], rule bij_betw_byWitness[where ?f' = "λi. i - n"], auto)
finally have **: "(∑t<s. abs(u(N* s+t) x)) ≤ (∑i<2* s. abs (u (n+i) x))"
by simp
have "(∑t<s. (∑i<N. abs(u s ((T^^(i* s+t))x)))) = (∑i<N* s. abs(u s ((T^^i) x)))"
by (rule sum_arith_progression)
also have "... ≤ (∑i<n + 2* s. abs(u s ((T^^i) x)))"
apply (rule sum_mono2) using ‹N * s ≤ n + s› by auto
finally have ***: "(∑t<s. (∑i<N. abs(u s ((T^^(i* s+t))x)))) ≤ s * birkhoff_sum (λx. abs(u s x/ s)) (n+2* s) x"
unfolding birkhoff_sum_def using ‹s>0› by (auto simp add: sum_divide_distrib[symmetric])
have ****: "s * (∑i<n + 2* s. abs(u s ((T^^i) x)) /s) = (∑i<n + 2* s. abs(u s ((T^^i) x)))"
by (auto simp add: sum_divide_distrib[symmetric])
have "s * (d/2) * card(V x ∩ {1..n}) = (∑t<s. (d/2) * card(V x ∩ {1..n}))"
by auto
also have "... ≤ (∑t<s. abs(u(N* s+t) x) + (Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x)
+ (∑i<N. abs(u s ((T^^(i* s+t))x))))"
apply (rule sum_mono) using eq_t by auto
also have "... = (∑t<s. abs(u(N* s+t) x)) + (∑t<s. Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x) + (∑t<s. (∑i<N. abs(u s ((T^^(i* s+t))x))))"
by (auto simp add: sum.distrib)
also have "... ≤ (∑i<2* s. abs (u (n+i) x)) + s * (Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x) + s * birkhoff_sum (λx. abs(u s x/ s)) (n+2* s) x"
using ** *** by auto
also have "... = s * ((1/s) * (∑i<2* s. abs (u (n+i) x)) + Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x + birkhoff_sum (λx. abs(u s x/ s)) (n+2* s) x)"
by (auto simp add: divide_simps mult.commute distrib_left)
finally show ?thesis
by auto
qed
have densV: "upper_asymptotic_density (V x) ≤ (2/d) * real_cond_exp M Invariants F2 x" if "x ∈ G" for x
proof -
have *: "(λn. abs(u n x/n)) ⇢ 0"
apply (rule tendsto_rabs_zero) using ‹x∈G› unfolding G_def by auto
define Bound where "Bound = (λn. (Max {u i x|i. i< s}*(1/n) + birkhoff_sum (λx. abs(u s x/ s)) (n+2* s) x / n
+ birkhoff_sum F (n + 2* s) x / n + (1/s) * (∑i<2* s. abs(u (n+i) x) / n)))"
have "Bound ⇢ (Max {u i x|i. i< s} * 0 + real_cond_exp M Invariants (λx. abs(u s x/s)) x
+ real_cond_exp M Invariants F x + (1/s) * (∑i < 2 * s. 0))"
unfolding Bound_def apply (intro tendsto_intros)
using ‹x∈G› * unfolding G_def by auto
moreover have "real_cond_exp M Invariants (λx. abs(u s x/s)) x + real_cond_exp M Invariants F x = real_cond_exp M Invariants F2 x"
using ‹x ∈ G› unfolding G_def by auto
ultimately have B_conv: "Bound ⇢ real_cond_exp M Invariants F2 x" by simp
have *: "(d/2) * card(V x ∩ {1..n}) / n ≤ Bound n" for n
proof -
have "(d/2) * card(V x ∩ {1..n}) / n ≤ (Max {u i x|i. i< s} + birkhoff_sum (λx. abs(u s x/ s)) (n+2* s) x
+ birkhoff_sum F (n + 2* s) x + (1/s) * (∑i<2* s. abs(u (n+i) x)))/n"
using Main2[of x n] using divide_right_mono of_nat_0_le_iff by blast
also have "... = Bound n"
unfolding Bound_def by (auto simp add: add_divide_distrib sum_divide_distrib[symmetric])
finally show ?thesis by simp
qed
have "ereal(d/2 * upper_asymptotic_density (V x)) = ereal(d/2) * ereal(upper_asymptotic_density (V x))"
by auto
also have "... = ereal (d/2) * limsup(λn. card(V x ∩ {1..n}) / n)"
using upper_asymptotic_density_shift[of "V x" 1 0] by auto
also have "... = limsup(λn. ereal (d/2) * (card(V x ∩ {1..n}) / n))"
by (rule limsup_ereal_mult_left[symmetric], auto)
also have "... ≤ limsup Bound"
apply (rule Limsup_mono) using * not_eventuallyD by auto
also have "... = ereal(real_cond_exp M Invariants F2 x)"
using B_conv convergent_limsup_cl convergent_def convergent_real_imp_convergent_ereal limI by force
finally have "d/2 * upper_asymptotic_density (V x) ≤ real_cond_exp M Invariants F2 x"
by auto
then show ?thesis
by (simp add: divide_simps mult.commute)
qed
define epsilon where "epsilon = 2 * rho / d"
have [simp]: "epsilon > 0" "epsilon ≠ 0" "epsilon ≥ 0" unfolding epsilon_def by auto
have "emeasure M {x∈space M. real_cond_exp M Invariants F2 x ≥ epsilon} ≤ (1/epsilon) * (∫x. real_cond_exp M Invariants F2 x ∂M)"
apply (intro integral_Markov_inequality real_cond_exp_pos real_cond_exp_int(1))
by (auto simp add: int_F2 F2_pos)
also have "... = (1/epsilon) * (∫x. F2 x ∂M)"
apply (intro arg_cong[where f = ennreal])
by (simp, rule real_cond_exp_int(2), simp add: int_F2)
also have "... < (1/epsilon) * 2 * rho"
using F2_int by (intro ennreal_lessI) (auto simp add: divide_simps)
also have "... = d"
unfolding epsilon_def by auto
finally have *: "emeasure M {x∈space M. real_cond_exp M Invariants F2 x ≥ epsilon} < d"
by simp
define G2 where "G2 = {x ∈ G. real_cond_exp M Invariants F2 x < epsilon}"
have [measurable]: "G2 ∈ sets M" unfolding G2_def by simp
have "1 = emeasure M G"
using ‹emeasure M G = 1› by simp
also have "... ≤ emeasure M (G2 ∪ {x∈space M. real_cond_exp M Invariants F2 x ≥ epsilon})"
apply (rule emeasure_mono) unfolding G2_def using sets.sets_into_space[OF ‹G ∈ sets M›] by auto
also have "... ≤ emeasure M G2 + emeasure M {x∈space M. real_cond_exp M Invariants F2 x ≥ epsilon}"
by (rule emeasure_subadditive, auto)
also have "... < emeasure M G2 + d"
using * by auto
finally have "1 - d < emeasure M G2"
using emeasure_eq_measure ‹d ≤ 1› by (auto intro!: ennreal_less_iff[THEN iffD2] simp del: ennreal_plus simp add: ennreal_plus[symmetric])
have "upper_asymptotic_density {n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - d * l} < d"
if "x ∈ G2" for x
proof -
have "x ∈ G" using ‹x ∈ G2› unfolding G2_def by auto
have "{n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - d * l} ⊆ U x ∪ V x"
unfolding U_def V_def by fastforce
then have "upper_asymptotic_density {n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - d * l} ≤ upper_asymptotic_density (U x ∪ V x)"
by (rule upper_asymptotic_density_subset)
also have "... ≤ upper_asymptotic_density (U x) + upper_asymptotic_density (V x)"
by (rule upper_asymptotic_density_union)
also have "... ≤ (2/d) * real_cond_exp M Invariants F2 x"
using densU[OF ‹x ∈ G›] densV[OF ‹x ∈ G›] by auto
also have "... < (2/d) * epsilon"
using ‹x ∈ G2› unfolding G2_def by (simp add: divide_simps)
text ‹This is where the choice of $\rho$ at the beginning of the proof is relevant:
we choose it so that the above term is at most $d$.›
also have "... = d" unfolding epsilon_def rho_def by auto
finally show ?thesis by simp
qed
then have "G2 ⊆ {x ∈ space M. upper_asymptotic_density {n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - d * l} < d}"
using sets.sets_into_space[OF ‹G2 ∈ sets M›] by blast
then have "emeasure M G2 ≤ emeasure M {x ∈ space M. upper_asymptotic_density {n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - d * l} < d}"
by (rule emeasure_mono, auto)
then have "emeasure M {x ∈ space M. upper_asymptotic_density {n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - d * l} < d} > 1 -d"
using ‹emeasure M G2 > 1 - d› by auto
then show ?thesis by blast
qed
text ‹The two previous lemmas are put together in the following lemma,
corresponding to Lemma 2.3 in~\cite{gouezel_karlsson}.›
lemma upper_density_delta:
fixes d::real
assumes "d > 0" "d ≤ 1"
shows "∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
emeasure M {x ∈ space M. ∀(N::nat). card {n ∈{..<N}. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - delta l * l} ≤ d * N} > 1 - d"
proof -
define d2 where "d2 = d/2"
have [simp]: "d2 > 0" unfolding d2_def using assms by simp
then have "¬ d2 < 0" using not_less [of d2 0] by (simp add: less_le)
have "d2/2 > 0" by simp
obtain c0 where c0: "c0> (0::real)" "emeasure M {x ∈ space M. upper_asymptotic_density {n. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - c0 * l} < d2/2} > 1 - (d2/2)"
using upper_density_all_times[OF ‹d2/2 > 0›] by blast
have "∃N. emeasure M {x ∈ space M. ∀n ≥ N. card ({n. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - c0 * l} ∩ {..<n}) < (d2/2) * n} > 1 - (d2/2)"
apply (rule upper_density_eventually_measure) using c0(2) by auto
then obtain N1 where N1: "emeasure M {x ∈ space M. ∀B ≥ N1. card ({n. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - c0 * l} ∩ {..<B}) < (d2/2) * B} > 1 - (d2/2)"
by blast
define O1 where "O1 = {x ∈ space M. ∀B ≥ N1. card ({n. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - c0 * l} ∩ {..<B}) < (d2/2) * B}"
have [measurable]: "O1 ∈ sets M" unfolding O1_def by auto
have "emeasure M O1 > 1 - (d2/2)" unfolding O1_def using N1 by auto
have *: "∃N. emeasure M {x ∈ space M. ∀B ≥ N. card({n. ∃l ∈ {N..n}. u n x - u (n-l) x ≤ - e * l} ∩ {..<B}) < e * B} > 1 - e"
if "e>0" "e ≤ 1" for e::real
proof -
obtain k where k: "emeasure M {x ∈ space M. upper_asymptotic_density {n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - e * l} < e} > 1 - e"
using upper_density_large_k[OF ‹e>0› ‹e ≤ 1›] by blast
then obtain N0 where N0: "emeasure M {x ∈ space M. ∀B ≥ N0. card({n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - e * l} ∩ {..<B}) < e * B} > 1 - e"
using upper_density_eventually_measure[OF _ k] by auto
define N where "N = max k N0"
have "emeasure M {x ∈ space M. ∀B ≥ N0. card({n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ - e * l} ∩ {..<B}) < e * B}
≤ emeasure M {x ∈ space M. ∀B ≥ N. card({n. ∃l ∈ {N..n}. u n x - u (n-l) x ≤ - e * l} ∩ {..<B}) < e * B}"
proof (rule emeasure_mono, auto)
fix x B assume H: "x ∈ space M" "∀B≥N0. card ({n. ∃l∈{k..n}. u n x - u (n - l) x ≤ - (e * real l)} ∩ {..<B}) < e * B" "N ≤ B"
have "card({n. ∃l ∈ {N..n}. u n x - u (n-l) x ≤ - (e * real l)} ∩ {..<B}) ≤ card({n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ -(e * real l)} ∩ {..<B})"
unfolding N_def by (rule card_mono, auto)
then have "real(card({n. ∃l ∈ {N..n}. u n x - u (n-l) x ≤ - (e * real l)} ∩ {..<B})) ≤ card({n. ∃l ∈ {k..n}. u n x - u (n-l) x ≤ -(e * real l)} ∩ {..<B})"
by simp
also have "... < e * B" using H(2) ‹B≥N› unfolding N_def by auto
finally show "card ({n. ∃l∈{N..n}. u n x - u (n - l) x ≤ - (e * real l)} ∩ {..<B}) < e * B"
by auto
qed
then have "emeasure M {x ∈ space M. ∀B ≥ N. card({n. ∃l ∈ {N..n}. u n x - u (n-l) x ≤ - e * l} ∩ {..<B}) < e * B} > 1 - e"
using N0 by simp
then show ?thesis by auto
qed
define Ne where "Ne = (λ(e::real). SOME N. emeasure M {x ∈ space M. ∀B ≥ N. card({n. ∃l ∈ {N..n}. u n x - u (n-l) x ≤ - e * l} ∩ {..<B}) < e * B} > 1 - e)"
have Ne: "emeasure M {x ∈ space M. ∀B ≥ Ne e. card({n. ∃l ∈ {Ne e..n}. u n x - u (n-l) x ≤ - e * l} ∩ {..<B}) < e * B} > 1 - e"
if "e>0" "e ≤ 1" for e::real
unfolding Ne_def by (rule someI_ex[OF *[OF that]])
define eps where "eps = (λ(n::nat). d2 * (1/2)^n)"
have [simp]: "eps n > 0" for n unfolding eps_def by auto
then have [simp]: "eps n ≥ 0" for n by (rule less_imp_le)
have "eps n ≤ (1 / 2) * 1" for n
unfolding eps_def d2_def
using ‹d ≤ 1› by (intro mult_mono power_le_one) auto
also have "… < 1" by auto
finally have [simp]: "eps n < 1" for n by simp
then have [simp]: "eps n ≤ 1" for n by (rule less_imp_le)
have "(λn. d2 * (1/2)^n) ⇢ d2 * 0"
by (rule tendsto_mult, auto simp add: LIMSEQ_realpow_zero)
then have "eps ⇢ 0" unfolding eps_def by auto
define Nf where "Nf = (λN. (if (N = 0) then 0
else if (N = 1) then N1 + 1
else max (N1+1) (Max {Ne(eps n)|n. n ≤ N}) + N))"
have "Nf N < Nf (N+1)" for N
proof -
consider "N = 0" | "N = 1" | "N>1" by fastforce
then show ?thesis
proof (cases)
assume "N>1"
have "Max {Ne (eps n) |n. n ≤ N} ≤ Max {Ne (eps n) |n. n ≤ Suc N}"
by (rule Max_mono, auto)
then show ?thesis unfolding Nf_def by auto
qed (auto simp add: Nf_def)
qed
then have "strict_mono Nf"
using strict_mono_Suc_iff by auto
define On where "On = (λ(N::nat).
(if (N = 1) then O1
else {x ∈ space M. ∀B ≥ Nf N. card({n. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N) * l} ∩ {..<B}) < (eps N) * B}))"
have [measurable]: "On N ∈ sets M" for N unfolding On_def by auto
have "emeasure M (On N) > 1 - eps N" if "N>0" for N
proof -
consider "N = 1" | "N>1" using ‹N>0› by linarith
then show ?thesis
proof (cases)
case 1
then show ?thesis unfolding On_def eps_def using ‹emeasure M O1 > 1 - (d2/2)› by auto
next
case 2
have "Ne (eps N) ≤ Max {Ne(eps n)|n. n ≤ N}"
by (rule Max.coboundedI, auto)
also have "... ≤ Nf N" unfolding Nf_def using ‹N>1› by auto
finally have "Ne (eps N) ≤ Nf N" by simp
have "1 - eps N < emeasure M {x ∈ space M. ∀B ≥ Ne(eps N). card({n. ∃l ∈ {Ne(eps N)..n}. u n x - u (n-l) x ≤ - (eps N) * l} ∩ {..<B}) < (eps N) * B}"
by (rule Ne) simp_all
also have "... ≤ emeasure M {x ∈ space M. ∀B ≥ Nf N. card({n. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N) * l} ∩ {..<B}) < (eps N) * B}"
proof (rule emeasure_mono, auto)
fix x n assume H: "x ∈ space M"
"∀n≥Ne (eps N). card ({n. ∃l∈{Ne (eps N)..n}. u n x - u (n - l) x ≤ - (eps N * l)} ∩ {..<n}) < eps N * n"
"Nf N ≤ n"
have "card({n. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<n}) ≤ card({n. ∃l ∈ {Ne(eps N)..n}. u n x - u (n-l) x ≤ -(eps N) * l} ∩ {..<n})"
apply (rule card_mono) using ‹Ne (eps N) ≤ Nf N› by auto
then have "real(card({n. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<n})) ≤ card({n. ∃l ∈ {Ne(eps N)..n}. u n x - u (n-l) x ≤ -(eps N) * l} ∩ {..<n})"
by simp
also have "... < (eps N) * n" using H(2) ‹n ≥ Nf N› ‹Ne (eps N) ≤ Nf N› by auto
finally show "real (card ({n. ∃l∈{Nf N..n}. u n x - u (n - l) x ≤ - (eps N * l)} ∩ {..<n})) < eps N * real n"
by auto
qed
also have "... = emeasure M (On N)"
unfolding On_def using ‹N>1› by auto
finally show ?thesis by simp
qed
qed
then have *: "emeasure M (On (N+1)) > 1 - eps (N+1)" for N by simp
define Ogood where "Ogood = (⋂N. On (N+1))"
have [measurable]: "Ogood ∈ sets M" unfolding Ogood_def by auto
have "emeasure M Ogood ≥ 1 - (∑N. eps(N+1))"
unfolding Ogood_def
apply (intro emeasure_intersection, auto)
using * by (auto simp add: eps_def summable_mult summable_divide summable_geometric less_imp_le)
moreover have "(∑N. eps(N+1)) = d2"
unfolding eps_def apply (subst suminf_mult)
using sums_unique[OF power_half_series, symmetric] by (auto intro!: summable_divide summable_geometric)
finally have "emeasure M Ogood ≥ 1 - d2" by simp
then have "emeasure M Ogood > 1 - d" unfolding d2_def using ‹d>0› ‹d ≤ 1›
by (simp add: emeasure_eq_measure field_sum_of_halves ennreal_less_iff)
have Ogood_union: "Ogood = (⋃(K::nat). Ogood ∩ {x ∈ space M. ∀n ∈ {1..Nf 1}. ∀l ∈ {1..n}. u n x - u (n-l) x > - (real K * l)})"
apply auto using sets.sets_into_space[OF ‹Ogood ∈ sets M›] apply blast
proof -
fix x
define M where "M = Max {abs(u n x - u (n-l) x)/l | n l. n ∈ {1..Nf 1} ∧ l ∈ {1..n}}"
obtain N::nat where "N > M" using reals_Archimedean2 by blast
have "finite { (n, l) | n l. n ∈ {1..Nf 1} ∧ l ∈ {1..n}}"
by (rule finite_subset[where ?B = "{1.. Nf 1} × {1..Nf 1}"], auto)
moreover have "{abs(u n x - u (n-l) x)/l | n l. n ∈ {1..Nf 1} ∧ l ∈ {1..n}}
= (λ (n, l). abs(u n x - u (n-l) x)/l)` { (n, l) | n l. n ∈ {1..Nf 1} ∧ l ∈ {1..n}}"
by auto
ultimately have fin: "finite {abs(u n x - u (n-l) x)/l | n l. n ∈ {1..Nf 1} ∧ l ∈ {1..n}}"
by auto
{
fix n l assume nl: "n ∈ {1..Nf 1} ∧ l ∈ {1..n}"
then have "real l>0" by simp
have "abs(u n x - u (n-l) x)/l ≤ M"
unfolding M_def apply (rule Max_ge) using fin nl by auto
then have "abs(u n x - u (n-l) x)/l < real N" using ‹N>M› by simp
then have "abs(u n x - u (n-l) x)< real N * l" using ‹0 < real l› pos_divide_less_eq by blast
then have "u n x - u (n-l) x > - (real N * l)" by simp
}
then have "∀n∈{Suc 0..Nf (Suc 0)}. ∀l∈{Suc 0..n}. - (real N * real l) < u n x - u (n - l) x"
by auto
then show "∃N. ∀n∈{Suc 0..Nf (Suc 0)}. ∀l∈{Suc 0..n}. - (real N * real l) < u n x - u (n - l) x"
by auto
qed
have "(λK. emeasure M (Ogood ∩ {x ∈ space M. ∀n ∈ {1..Nf 1}. ∀l ∈ {1..n}. u n x - u (n-l) x > - (real K * l)}))
⇢ emeasure M (⋃(K::nat). Ogood ∩ {x ∈ space M. ∀n ∈ {1..Nf 1}. ∀l ∈ {1..n}. u n x - u (n-l) x > - (real K * l)})"
apply (rule Lim_emeasure_incseq, auto)
unfolding incseq_def apply auto
proof -
fix m n x na l
assume "m ≤ (n::nat)" "∀n∈{Suc 0..Nf (Suc 0)}. ∀l∈{Suc 0..n}. - (real m * real l) < u n x - u (n - l) x"
"Suc 0 ≤ l" "l ≤ na" "na ≤ Nf (Suc 0)"
then have "- (real m * real l) < u na x - u (na - l) x" by auto
moreover have "- (real n * real l) ≤ - (real m * real l)" using ‹m ≤ n› by (simp add: mult_mono)
ultimately show "- (real n * real l) < u na x - u (na - l) x" by auto
qed
moreover have "emeasure M (⋃(K::nat). Ogood ∩ {x ∈ space M. ∀n ∈ {1..Nf 1}. ∀l ∈ {1..n}. u n x - u (n-l) x > - (real K * l)}) > 1 - d"
using Ogood_union ‹emeasure M Ogood > 1 - d› by auto
ultimately have a: "eventually (λK. emeasure M (Ogood ∩ {x ∈ space M. ∀n ∈ {1..Nf 1}. ∀l ∈ {1..n}. u n x - u (n-l) x > - (real K * l)}) > 1 - d) sequentially"
by (rule order_tendstoD(1))
have b: "eventually (λK. K ≥ max c0 d2) sequentially"
using eventually_at_top_linorder nat_ceiling_le_eq by blast
have "eventually (λK. K ≥ max c0 d2 ∧ emeasure M (Ogood ∩ {x ∈ space M. ∀n ∈ {1..Nf 1}. ∀l ∈ {1..n}. u n x - u (n-l) x > - (real K * l)}) > 1 - d) sequentially"
by (rule eventually_elim2[OF a b], auto)
then obtain K where K: "K≥max c0 d2" "emeasure M (Ogood ∩ {x ∈ space M. ∀n ∈ {1..Nf 1}. ∀l ∈ {1..n}. u n x - u (n-l) x > - (real K * l)}) > 1 - d"
using eventually_False_sequentially eventually_elim2 by blast
define Og where "Og = Ogood ∩ {x ∈ space M. ∀n ∈ {1..Nf 1}. ∀l ∈ {1..n}. u n x - u (n-l) x > - (real K * l)}"
have [measurable]: "Og ∈ sets M" unfolding Og_def by simp
have "emeasure M Og > 1 - d" unfolding Og_def using K by simp
have fin: "finite {N. Nf N ≤ n}" for n
using pseudo_inverse_finite_set[OF filterlim_subseq[OF ‹strict_mono Nf›]] by auto
define prev_N where "prev_N = (λn. Max {N. Nf N ≤ n})"
define delta where "delta = (λl. if (prev_N l ≤ 1) then K else eps (prev_N l))"
have "∀l. delta l > 0"
unfolding delta_def using ‹K≥max c0 d2› ‹c0>0› by auto
have "LIM n sequentially. prev_N n :> at_top"
unfolding prev_N_def apply (rule tendsto_at_top_pseudo_inverse2)
using ‹strict_mono Nf› by (simp add: filterlim_subseq)
then have "eventually (λl. prev_N l > 1) sequentially"
by (simp add: filterlim_iff)
then have "eventually (λl. delta l = eps(prev_N l)) sequentially"
unfolding delta_def by (simp add: eventually_mono)
moreover have "(λl. eps(prev_N l)) ⇢ 0"
by (rule filterlim_compose[OF ‹eps ⇢ 0› ‹LIM n sequentially. prev_N n :> at_top›])
ultimately have "delta ⇢ 0" by (simp add: filterlim_cong)
have "delta n ≤ K" for n
proof -
have *: "d2 * (1 / 2) ^ prev_N n ≤ real K * 1"
apply (rule mult_mono') using ‹K ≥ max c0 d2› ‹d2>0› by (auto simp add: power_le_one less_imp_le)
then show ?thesis unfolding delta_def apply auto unfolding eps_def using * by auto
qed
define bad_times where "bad_times = (λx. {n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∪
(⋃N∈{2..}. {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)}))"
have card_bad_times: "card (bad_times x ∩ {..<B}) ≤ d2 * B" if "x ∈ Og" for x B
proof -
have "(∑N∈{..<B}. (1/(2::real))^N) ≤ (∑N. (1/2)^N)"
by (rule sum_le_suminf, auto simp add: summable_geometric)
also have "... = 2" using suminf_geometric[of "1/(2::real)"] by auto
finally have *: "(∑N∈{..<B}. (1/(2::real))^N) ≤ 2" by simp
have "(∑ N ∈ {2..<B}. eps N * B) ≤ (∑ N ∈ {2..<B+2}. eps N * B)"
by (rule sum_mono2, auto)
also have "... = (∑N∈{2..<B+2}. d2 * (1/2)^N * B)"
unfolding eps_def by auto
also have "... = (∑N∈{..<B}. d2 * (1/2)^(N+2) * B)"
by (rule sum.reindex_bij_betw[symmetric],rule bij_betw_byWitness[where ?f' = "λi. i-2"], auto)
also have "... = (∑N∈{..<B}. (d2 * (1/4) * B) * (1/2)^N)"
by (auto, metis (lifting) mult.commute mult.left_commute)
also have "... = (d2 * (1/4) * B) * (∑N∈{..<B}. (1/2)^N)"
by (rule sum_distrib_left[symmetric])
also have "... ≤ (d2 * (1/4) * B) * 2"
apply (rule mult_left_mono) using * ‹d2 > 0› apply auto
by (metis ‹0 < d2› mult_eq_0_iff mult_le_0_iff not_le of_nat_eq_0_iff of_nat_le_0_iff)
finally have I: "(∑ N ∈ {2..<B}. eps N * B) ≤ d2/2 * B"
by auto
have "x ∈ On 1" using ‹x ∈ Og› unfolding Og_def Ogood_def by auto
then have "x ∈ O1" unfolding On_def by auto
have B1: "real(card({n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B})) ≤ (d2/2) * B" for B
proof (cases "B ≥ N1")
case True
have "card({n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B})
≤ card({n. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B})"
by (rule card_mono, auto)
also have "... ≤ (d2/2) * B"
using ‹x ∈ O1› unfolding O1_def using True by auto
finally show ?thesis by auto
next
case False
then have "B < Nf 1" unfolding Nf_def by auto
then have "{n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B} = {}"
by auto
then have "card ({n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B}) = 0"
by auto
also have "... ≤ (d2/2) * B"
using ‹¬ d2 < 0› by simp
finally show ?thesis by simp
qed
have BN: "real(card ({n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})) ≤ eps N * B" if "N ≥ 2" for N B
proof -
have "x ∈ On ((N-1) + 1)" using ‹x ∈ Og› unfolding Og_def Ogood_def by auto
then have "x ∈ On N" using ‹N≥2› by auto
show ?thesis
proof (cases "B ≥ Nf N")
case True
have "card ({n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B}) ≤
card ({n. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})"
by (rule card_mono, auto)
also have "... ≤ eps N * B"
using ‹x ∈ On N› ‹N≥2› True unfolding On_def by auto
finally show ?thesis by simp
next
case False
then have "{n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B} = {}"
by auto
then have "card ({n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B}) = 0"
by auto
also have "... ≤ eps N * B"
by (metis ‹⋀n. 0 < eps n› le_less mult_eq_0_iff mult_pos_pos of_nat_0 of_nat_0_le_iff)
finally show ?thesis by simp
qed
qed
{
fix N assume "N ≥ B"
have "Nf N ≥ B" using seq_suble[OF ‹strict_mono Nf›, of N] ‹N ≥ B› by simp
then have "{Nf N..} ∩ {..<B} = {}" by auto
then have "{n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B} = {}" by auto
}
then have *: "(⋃N∈{B..}. {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B}) = {}"
by auto
have "{2..} ⊆ {2..<B} ∪ {B..}" by auto
then have "(⋃N∈{2..}. {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})
⊆ (⋃N∈{2..<B}. {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})
∪ (⋃N∈{B..}. {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})"
by auto
also have "... = (⋃N∈{2..<B}. {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})"
using * by auto
finally have *: "bad_times x ∩ {..<B} ⊆ {n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B}
∪ (⋃N∈{2..<B}. {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})"
unfolding bad_times_def by auto
have "card(bad_times x ∩ {..<B}) ≤ card({n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B}
∪ (⋃N∈{2..<B}. {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B}))"
by (rule card_mono[OF _ *], auto)
also have "... ≤ card({n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B}) +
card (⋃N∈{2..<B}. {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})"
by (rule card_Un_le)
also have "... ≤ card({n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B}) +
(∑ N∈{2..<B}. card ({n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B}))"
by (simp del: UN_simps, rule card_UN_le, auto)
finally have "real (card(bad_times x ∩ {..<B})) ≤
real(card({n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B})
+ (∑ N∈{2..<B}. card ({n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})))"
by (subst of_nat_le_iff, simp)
also have "... = real(card({n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)} ∩ {..<B}))
+ (∑ N∈{2..<B}. real(card ({n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})))"
by auto
also have "... ≤ (d2/2 * B) + (∑ N∈{2..<B}. real(card ({n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)} ∩ {..<B})))"
using B1 by simp
also have "... ≤ (d2/2 * B) + (∑ N ∈ {2..<B}. eps N * B)"
apply (simp, rule sum_mono) using BN by auto
also have "... ≤ (d2/2 * B) + (d2/2*B)"
using I by auto
finally show ?thesis by simp
qed
have ineq_on_Og: "u n x - u (n-l) x > - delta l * l" if "l ∈ {1..n}" "n ∉ bad_times x" "x ∈ Og" for n x l
proof -
consider "n < Nf 1" | "n ≥ Nf 1 ∧ prev_N l ≤ 1" | "n ≥ Nf 1 ∧ prev_N l ≥ 2" by linarith
then show ?thesis
proof (cases)
assume "n < Nf 1"
then have "{N. Nf N ≤ n} = {0}"
apply auto using ‹strict_mono Nf› unfolding strict_mono_def
apply (metis le_trans less_Suc0 less_imp_le_nat linorder_neqE_nat not_less)
unfolding Nf_def by auto
then have "prev_N n = 0" unfolding prev_N_def by auto
moreover have "prev_N l ≤ prev_N n"
unfolding prev_N_def apply (rule Max_mono) using ‹l ∈ {1..n}› fin apply auto
unfolding Nf_def by auto
ultimately have "prev_N l = 0" using ‹prev_N l ≤ prev_N n› by auto
then have "delta l = K" unfolding delta_def by auto
have "1 ∉ {N. Nf N ≤ n}" using fin[of n]
by (metis (full_types) Max_ge ‹prev_N n = 0› fin not_one_le_zero prev_N_def)
then have "n < Nf 1" by auto
moreover have "n ≥ 1" using ‹l ∈ {1..n}› by auto
ultimately have "n ∈ {1..Nf 1}" by auto
then have "u n x - u (n-l) x > - (real K * l)" using ‹x ∈ Og› unfolding Og_def using ‹l ∈ {1..n}› by auto
then show ?thesis using ‹delta l = K› by auto
next
assume H: "n ≥ Nf 1 ∧ prev_N l ≤ 1"
then have "delta l = K" unfolding delta_def by auto
have "n ∉ {n ∈ {Nf 1..}. ∃l∈{1..n}. u n x - u (n-l) x ≤ - (c0 * l)}"
using ‹n ∉ bad_times x› unfolding bad_times_def by auto
then have "u n x - u (n-l) x > - (c0 * l)"
using H ‹l ∈ {1..n}› by force
moreover have "- (c0 * l) ≥ - (real K * l)" using K(1) by (simp add: mult_mono)
ultimately show ?thesis using ‹delta l = K› by auto
next
assume H: "n ≥ Nf 1 ∧ prev_N l ≥ 2"
define N where "N = prev_N l"
have "N ≥ 2" unfolding N_def using H by auto
have "prev_N l ∈ {N. Nf N ≤ l}"
unfolding prev_N_def apply (rule Max_in, auto simp add: fin)
unfolding Nf_def by auto
then have "Nf N ≤ l" unfolding N_def by auto
then have "Nf N ≤ n" using ‹l ∈ {1..n}› by auto
have "n ∉ {n ∈ {Nf N..}. ∃l ∈ {Nf N..n}. u n x - u (n-l) x ≤ - (eps N * l)}"
using ‹n ∉ bad_times x› ‹N≥2› unfolding bad_times_def by auto
then have "u n x - u (n-l) x > - (eps N * l)"
using ‹Nf N ≤ n› ‹Nf N ≤ l› ‹l ∈ {1..n}› by force
moreover have "eps N = delta l" unfolding delta_def N_def using H by auto
ultimately show ?thesis by auto
qed
qed
have "Og ⊆ {x ∈ space M. ∀(B::nat). card {n ∈{..<B}. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - delta l * l} ≤ d * B}"
proof (auto)
fix x assume "x ∈ Og"
then show "x ∈ space M" unfolding Og_def by auto
next
fix x B assume "x ∈ Og"
have *: "{n. n < B ∧ (∃l∈{Suc 0..n}. u n x - u (n - l) x ≤ - (delta l * real l))} ⊆ bad_times x ∩ {..<B}"
using ineq_on_Og ‹x∈Og› by force
have "card {n. n < B ∧ (∃l∈{Suc 0..n}. u n x - u (n - l) x ≤ - (delta l * real l))} ≤ card (bad_times x ∩ {..<B})"
apply (rule card_mono, simp) using * by auto
also have "... ≤ d2 * B" using card_bad_times ‹x ∈ Og› by auto
also have "... ≤ d * B" unfolding d2_def using ‹d>0› by auto
finally show "card {n. n < B ∧ (∃l∈{Suc 0..n}. u n x - u (n - l) x ≤ - (delta l * real l))} ≤ d * B"
by simp
qed
then have "emeasure M Og ≤ emeasure M {x ∈ space M. ∀(B::nat). card {n ∈{..<B}. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - delta l * l} ≤ d * B}"
by (rule emeasure_mono, auto)
then have "emeasure M {x ∈ space M. ∀(B::nat). card {n ∈{..<B}. ∃l ∈ {1..n}. u n x - u (n-l) x ≤ - delta l * l} ≤ d * B} > 1-d"
using ‹emeasure M Og > 1 - d› by auto
then show ?thesis using ‹delta ⇢ 0› ‹∀l. delta l > 0› by auto
qed
text ‹We go back to the natural time direction, by using the previous result for the inverse map
and the inverse subcocycle, and a change of variables argument. The price to pay is that the
estimates we get are weaker: we have a control on a set of upper asymptotic density close to $1$, while
having a set of lower asymptotic density close to $1$ as before would be stronger. This will
nevertheless be sufficient for our purposes below.›
lemma upper_density_good_direction_invertible:
assumes "invertible_qmpt"
"d>(0::real)" "d ≤ 1"
shows "∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
emeasure M {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} ≥ 1-d} ≥ ennreal(1-d)"
proof -
interpret I: Gouezel_Karlsson_Kingman M Tinv "(λn x. u n ((Tinv^^n) x))"
proof
show "Tinv ∈ quasi_measure_preserving M M"
using Tinv_qmpt[OF ‹invertible_qmpt›] unfolding qmpt_def qmpt_axioms_def by simp
show "Tinv ∈ measure_preserving M M"
using Tinv_mpt[OF ‹invertible_qmpt›] unfolding mpt_def mpt_axioms_def by simp
show "mpt.subcocycle M Tinv (λn x. u n ((Tinv ^^ n) x))"
using subcocycle_u_Tinv[OF subu ‹invertible_qmpt›] by simp
show "- ∞ < subcocycle_avg_ereal (λn x. u n ((Tinv ^^ n) x))"
using subcocycle_avg_ereal_Tinv[OF subu ‹invertible_qmpt›] subu_fin by simp
show "AE x in M. fmpt.subcocycle_lim M Tinv (λn x. u n ((Tinv ^^ n) x)) x = 0"
using subcocycle_lim_Tinv[OF subu ‹invertible_qmpt›] subu_0 by auto
qed
have bij: "bij T" using ‹invertible_qmpt› unfolding invertible_qmpt_def by simp
define e where "e = d * d / 2"
have "e>0" "e≤1" unfolding e_def using ‹d>0› ‹d ≤ 1›
by (auto, meson less_imp_le mult_left_le one_le_numeral order_trans)
obtain delta::"nat ⇒ real" where d: "⋀l. delta l > 0"
"delta ⇢ 0"
"emeasure M {x ∈ space M. ∀N.
card {n ∈ {..<N}. ∃l∈{1..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) ≤ - delta l * real l} ≤ e * real N}
> 1-e"
using I.upper_density_delta[OF ‹e>0› ‹e≤1›] by blast
define S where "S = {x ∈ space M. ∀N.
card {n ∈ {..<N}. ∃l∈{1..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) ≤ - delta l * real l} ≤ e * real N}"
have [measurable]: "S ∈ sets M" unfolding S_def by auto
have "emeasure M S > 1 - e" unfolding S_def using d(3) by simp
define Og where "Og = (λn. {x ∈ space M. ∀l∈{1..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) > - delta l * real l})"
have [measurable]: "Og n ∈ sets M" for n unfolding Og_def by auto
define Pg where "Pg = (λn. {x ∈ space M. ∀l∈{1..n}. u n x - u (n - l) ((T^^l) x) > - delta l * real l})"
have [measurable]: "Pg n ∈ sets M" for n unfolding Pg_def by auto
define Bad where "Bad = (λi::nat. {x ∈ space M. ∀N≥i. card {n ∈ {..<N}. x ∈ Pg n} ≤ (1-d) * real N})"
have [measurable]: "Bad i ∈ sets M" for i unfolding Bad_def by auto
then have "range Bad ⊆ sets M" by auto
have "incseq Bad"
unfolding Bad_def incseq_def by auto
have inc: "{x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d}
⊆ (⋃i. Bad i)"
proof
fix x assume H: "x ∈ {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d}"
then have "x ∈ space M" by simp
define A where "A = {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l}"
have "upper_asymptotic_density A < 1-d" using H unfolding A_def by simp
then have "∃i. ∀N≥i. card (A ∩ {..<N}) ≤ (1-d)* real N"
using upper_asymptotic_densityD[of A "1-d"] by (metis (no_types, lifting) eventually_sequentially less_imp_le)
then obtain i where "card (A ∩ {..<N}) ≤ (1-d)* real N" if "N≥i" for N by blast
moreover have "A ∩ {..<N} = {n. n<N ∧ (∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l)}" for N
unfolding A_def by auto
ultimately have "x ∈ Bad i" unfolding Bad_def Pg_def using ‹x ∈ space M›
by auto
then show "x ∈ (⋃i. Bad i)" by blast
qed
have "emeasure M (Og n) ≤ emeasure M (Pg n)" for n
proof -
have *: "(T^^n)-`(Og n) ∩ space M ⊆ Pg n" for n
proof
fix x assume x: "x ∈ (T^^n)-`(Og n) ∩ space M"
define y where "y = (T^^n) x"
then have "y ∈ Og n" using x by auto
have "y ∈ space M" using sets.sets_into_space[OF ‹Og n ∈ sets M›] ‹y ∈ Og n› by auto
have "x = (Tinv^^n) y"
unfolding y_def Tinv_def using inv_fn_o_fn_is_id[OF bij, of n] by (metis comp_apply)
{
fix l assume "l ∈ {1..n}"
have "(T^^l) x = (T^^l) ((Tinv^^l) ((Tinv^^(n-l))y))"
apply (subst ‹x = (Tinv^^n) y›) using funpow_add[of l "n-l" Tinv] ‹l ∈ {1..n}› by fastforce
then have *: "(T^^l) x = (Tinv^^(n-l)) y"
unfolding Tinv_def using fn_o_inv_fn_is_id[OF bij] by (metis comp_apply)
then have "u n x - u (n-l) ((T^^l) x) = u n ((Tinv^^n) y) - u (n-l) ((Tinv^^(n-l)) y)"
using ‹x = (Tinv^^n) y› by auto
also have "... > - delta l * real l"
using ‹y ∈ Og n› ‹l ∈ {1..n}› unfolding Og_def by auto
finally have "u n x - u (n-l) ((T^^l) x) > - delta l * real l" by simp
}
then show "x ∈ Pg n"
unfolding Pg_def using x by auto
qed
have "emeasure M (Og n) = emeasure M ((T^^n)-`(Og n) ∩ space M)"
using T_vrestr_same_emeasure(2) unfolding vimage_restr_def by auto
also have "... ≤ emeasure M (Pg n)"
apply (rule emeasure_mono) using * by auto
finally show ?thesis by simp
qed
{
fix N::nat assume "N ≥ 1"
have I: "card {n∈{..<N}. x ∈ Og n} ≥ (1-e) * real N" if "x ∈ S" for x
proof -
have "x ∈ space M" using ‹x ∈ S› sets.sets_into_space[OF ‹S ∈ sets M›] by auto
have a: "real (card {n. n < N ∧ (∃l∈{Suc 0..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) ≤ - (delta l * real l))}) ≤ e * real N"
using ‹x ∈ S› unfolding S_def by auto
have *: "{n. n < N} = {n. n < N ∧ (∃l∈{Suc 0..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) ≤ - (delta l * real l))}
∪ {n. n < N ∧ x ∈ Og n}" unfolding Og_def using ‹x ∈ space M›
by (auto, meson atLeastAtMost_iff linorder_not_le)
have "N = card {n. n < N}" by auto
also have "... = card {n. n < N ∧ (∃l∈{Suc 0..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) ≤ - (delta l * real l))}
+ card {n. n < N ∧ x ∈ Og n}"
apply (subst *, rule card_Un_disjoint) unfolding Og_def by auto
ultimately have "real N ≤ e * real N + card {n. n < N ∧ x ∈ Og n}"
using a by auto
then show ?thesis
by (auto simp add: algebra_simps)
qed
define m where "m = measure M (Bad N)"
have "m ≥ 0" "1-m ≥ 0" unfolding m_def by auto
have *: "1-e ≤ emeasure M S" using ‹emeasure M S > 1 - e› by auto
have "ennreal((1-e) * ((1-e) * real N)) = ennreal(1-e) * ennreal((1-e) * real N)"
apply (rule ennreal_mult) using ‹e ≤ 1› by auto
also have "... ≤ emeasure M S * ennreal((1-e) * real N)"
using mult_right_mono[OF *] by simp
also have "... = (∫⇧+ x∈S. ((1-e) * real N) ∂M)"
by (metis ‹S ∈ events› mult.commute nn_integral_cmult_indicator)
also have "... ≤ (∫⇧+x ∈ S. ennreal(card {n∈{..<N}. x ∈ Og n}) ∂M)"
apply (rule nn_integral_mono) using I unfolding indicator_def by (simp)
also have "... ≤ (∫⇧+x ∈ space M. ennreal(card {n∈{..<N}. x ∈ Og n}) ∂M)"
by (rule nn_set_integral_set_mono, simp only: sets.sets_into_space[OF ‹S ∈ sets M›])
also have "... = (∫⇧+x. ennreal(card {n∈{..<N}. x ∈ Og n}) ∂M)"
by (rule nn_set_integral_space)
also have "... = (∫⇧+x. ennreal (∑n∈{..<N}. ((indicator (Og n) x)::nat)) ∂M)"
apply (rule nn_integral_cong) using sum_indicator_eq_card2[of "{..<N}" Og] by auto
also have "... = (∫⇧+x. (∑n∈{..<N}. indicator (Og n) x) ∂M)"
apply (rule nn_integral_cong, auto, simp only: sum_ennreal[symmetric])
by (metis ennreal_0 ennreal_eq_1 indicator_eq_1_iff indicator_simps(2) real_of_nat_indicator)
also have "... = (∑n ∈{..<N}. (∫⇧+x. (indicator (Og n) x) ∂M))"
by (rule nn_integral_sum, simp)
also have "... = (∑n ∈{..<N}. emeasure M (Og n))"
by simp
also have "... ≤ (∑n ∈{..<N}. emeasure M (Pg n))"
apply (rule sum_mono) using ‹⋀n. emeasure M (Og n) ≤ emeasure M (Pg n)› by simp
also have "... = (∑n ∈{..<N}. (∫⇧+x. (indicator (Pg n) x) ∂M))"
by simp
also have "... = (∫⇧+x. (∑n∈{..<N}. indicator (Pg n) x) ∂M)"
by (rule nn_integral_sum[symmetric], simp)
also have "... = (∫⇧+x. ennreal (∑n∈{..<N}. ((indicator (Pg n) x)::nat)) ∂M)"
apply (rule nn_integral_cong, auto, simp only: sum_ennreal[symmetric])
by (metis ennreal_0 ennreal_eq_1 indicator_eq_1_iff indicator_simps(2) real_of_nat_indicator)
also have "... = (∫⇧+x. ennreal(card {n∈{..<N}. x ∈ Pg n}) ∂M)"
apply (rule nn_integral_cong) using sum_indicator_eq_card2[of "{..<N}" Pg] by auto
also have "... = (∫⇧+x ∈ space M. ennreal(card {n∈{..<N}. x ∈ Pg n}) ∂M)"
by (rule nn_set_integral_space[symmetric])
also have "... = (∫⇧+x ∈ Bad N ∪ (space M - Bad N). ennreal(card {n∈{..<N}. x ∈ Pg n}) ∂M)"
apply (rule nn_integral_cong) unfolding indicator_def by auto
also have "... = (∫⇧+x ∈ Bad N. ennreal(card {n∈{..<N}. x ∈ Pg n}) ∂M)
+ (∫⇧+x ∈ space M - Bad N. ennreal(card {n∈{..<N}. x ∈ Pg n}) ∂M)"
by (rule nn_integral_disjoint_pair, auto)
also have "... ≤ (∫⇧+x ∈ Bad N. ennreal((1-d) * real N) ∂M) + (∫⇧+x ∈ space M - Bad N. ennreal(real N) ∂M)"
apply (rule add_mono)
apply (rule nn_integral_mono, simp add: Bad_def indicator_def, auto)
apply (rule nn_integral_mono, simp add: indicator_def, auto)
using card_Collect_less_nat[of N] card_mono[of "{n. n < N}"] by (simp add: Collect_mono_iff)
also have "... = ennreal((1-d) * real N) * emeasure M (Bad N) + ennreal(real N) * emeasure M (space M - Bad N)"
by (simp add: nn_integral_cmult_indicator)
also have "... = ennreal((1-d) * real N) * ennreal(m) + ennreal(real N) * ennreal(1-m)"
unfolding m_def by (simp add: emeasure_eq_measure prob_compl)
also have "... = ennreal((1-d) * real N * m + real N * (1-m))"
using ‹m ≥ 0› ‹1-m ≥ 0› ‹d ≤ 1› ennreal_plus ennreal_mult by auto
finally have "ennreal((1-e) * ((1-e) * real N)) ≤ ennreal((1-d) * real N * m + real N * (1-m))"
by simp
moreover have "(1-d) * real N * m + real N * (1-m) ≥ 0"
using ‹m ≥ 0› ‹1-m ≥ 0› ‹d ≤ 1› by auto
ultimately have "(1-e) * ((1-e) * real N) ≤ (1-d) * real N * m + real N * (1-m)"
using ennreal_le_iff by auto
then have "0 ≤ (e * 2 - d * m - e * e) * real N"
by (auto simp add: algebra_simps)
then have "0 ≤ e * 2 - d * m - e * e"
using ‹N ≥ 1› by (simp add: zero_le_mult_iff)
also have "... ≤ e * 2 - d * m"
using ‹e > 0› by auto
finally have "m ≤ e * 2 / d"
using ‹d>0› by (auto simp add: algebra_simps divide_simps)
then have "m ≤ d"
unfolding e_def using ‹d>0› by (auto simp add: divide_simps)
then have "emeasure M (Bad N) ≤ d"
unfolding m_def by (simp add: emeasure_eq_measure ennreal_leI)
}
then have "emeasure M (⋃i. Bad i) ≤ d"
using LIMSEQ_le_const2[OF Lim_emeasure_incseq[OF ‹range Bad ⊆ sets M› ‹incseq Bad›]] by auto
then have "emeasure M {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d} ≤ d"
using emeasure_mono[OF inc, of M] by auto
then have *: "measure M {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d} ≤ d"
using emeasure_eq_measure ‹d>0› by fastforce
have "{x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} ≥ 1-d}
= space M - {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d}"
by auto
then have "measure M {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} ≥ 1-d}
= measure M (space M - {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d})"
by simp
also have "... = measure M (space M)
- measure M {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d}"
by (rule measure_Diff, auto)
also have "... ≥ 1 - d"
using * prob_space by linarith
finally have "emeasure M {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} ≥ 1-d} ≥ 1 - d"
using emeasure_eq_measure by auto
then show ?thesis using d(1) d(2) by blast
qed
text ‹Now, we want to remove the invertibility assumption in the previous lemma. The idea is to
go to the natural extension of the system, use the result there and project it back.
However, if the system is not defined on a polish space, there is no reason why it should have
a natural extension, so we have first to project the original system on a polish space on which
the subcocycle is defined. This system is obtained by considering the joint distribution of the
subcocycle and all its iterates (this is indeed a polish system, as a space of functions from
$\mathbb{N}^2$ to $\mathbb{R}$).›
lemma upper_density_good_direction:
assumes "d>(0::real)" "d ≤ 1"
shows "∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
emeasure M {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} ≥ 1-d} ≥ ennreal(1-d)"
proof -
define U where "U = (λx. (λn. u n x))"
define projJ where "projJ = (λx. (λn. U ((T^^n)x)))"
define MJ where "MJ = (distr M borel (λx. (λn. U ((T^^n)x))))"
define TJ::"(nat ⇒ nat ⇒ real) ⇒ (nat ⇒ nat ⇒ real)" where "TJ = nat_left_shift"
have *: "mpt_factor projJ MJ TJ"
unfolding projJ_def MJ_def TJ_def apply (rule fmpt_factor_projection)
unfolding U_def by (rule measurable_coordinatewise_then_product, simp)
interpret J: polish_pmpt MJ TJ
unfolding projJ_def polish_pmpt_def apply (auto)
apply (rule pmpt_factor) using * apply simp
unfolding polish_pmpt_axioms_def MJ_def by auto
have [simp]: "projJ ∈ measure_preserving M MJ" using mpt_factorE(1)[OF *] by simp
then have [measurable]: "projJ ∈ measurable M MJ" by (simp add: measure_preservingE(1))
text ‹We define a subcocycle $uJ$ in the projection corresponding to the original
subcocycle $u$ above. (With the natural definition, it is only a subcocycle almost
everywhere.) We check that it shares most properties of $u$.›
define uJ::"nat ⇒ (nat ⇒ nat ⇒ real) ⇒ real" where "uJ = (λn x. x 0 n)"
have [measurable]: "uJ n ∈ borel_measurable borel" for n
unfolding uJ_def by (metis measurable_product_coordinates measurable_product_then_coordinatewise)
moreover have "measurable borel borel = measurable MJ borel"
apply (rule measurable_cong_sets) unfolding MJ_def by auto
ultimately have [measurable]: "uJ n ∈ borel_measurable MJ" for n by fast
have uJ_proj: "u n x = uJ n (projJ x)" for n x
unfolding uJ_def projJ_def U_def by auto
have uJ_int: "integrable MJ (uJ n)" for n
apply (rule measure_preserving_preserves_integral'(1)[OF ‹projJ ∈ measure_preserving M MJ›])
apply (subst uJ_proj[of n, symmetric]) using int_u[of n] by auto
have uJ_int2: "(∫x. uJ n x ∂MJ) = (∫x. u n x ∂M)" for n
unfolding uJ_proj
apply (rule measure_preserving_preserves_integral'(2)[OF ‹projJ ∈ measure_preserving M MJ›])
apply (subst uJ_proj[of n, symmetric]) using int_u[of n] by auto
have uJ_AE: "AE x in MJ. uJ (n+m) x ≤ uJ n x + uJ m ((TJ^^n) x)" for m n
proof -
have "AE x in M. uJ (n+m) (projJ x) ≤ uJ n (projJ x) + uJ m (projJ ((T^^n) x))"
unfolding uJ_proj[symmetric] using subcocycle_ineq[OF subu] by auto
moreover have "AE x in M. projJ ((T^^n) x) = (TJ^^n) (projJ x)"
using qmpt_factor_iterates[OF mpt_factor_is_qmpt_factor[OF *]] by auto
ultimately have *: "AE x in M. uJ (n+m) (projJ x) ≤ uJ n (projJ x) + uJ m ((TJ^^n) (projJ x))"
by auto
show ?thesis
apply (rule quasi_measure_preserving_AE'[OF measure_preserving_is_quasi_measure_preserving[OF ‹projJ ∈ measure_preserving M MJ›], OF *])
by auto
qed
have uJ_0: "AE x in MJ. (λn. uJ n x / n) ⇢ 0"
proof -
have "AE x in M. (λn. u n x / n) ⇢ subcocycle_lim u x"
by (rule kingman_theorem_nonergodic(1)[OF subu subu_fin])
moreover have "AE x in M. subcocycle_lim u x = 0"
using subu_0 by simp
ultimately have *: "AE x in M. (λn. uJ n (projJ x) / n) ⇢ 0"
unfolding uJ_proj by auto
show ?thesis
apply (rule quasi_measure_preserving_AE'[OF measure_preserving_is_quasi_measure_preserving[OF ‹projJ ∈ measure_preserving M MJ›], OF *])
by auto
qed
text ‹Then, we go to the natural extension of $TJ$, to have an invertible system.›
define MI where "MI = J.natural_extension_measure"
define TI where "TI = J.natural_extension_map"
define projI where "projI = J.natural_extension_proj"
interpret I: pmpt MI TI unfolding MI_def TI_def by (rule J.natural_extension(1))
have "I.mpt_factor projI MJ TJ" unfolding projI_def
using I.mpt_factorE(1) J.natural_extension(3) MI_def TI_def by auto
then have [simp]: "projI ∈ measure_preserving MI MJ" using I.mpt_factorE(1) by auto
then have [measurable]: "projI ∈ measurable MI MJ" by (simp add: measure_preservingE(1))
have "I.invertible_qmpt"
using J.natural_extension(2) MI_def TI_def by auto
text ‹We define a natural subcocycle $uI$ there, and check its properties.›
define uI where uI_proj: "uI = (λn x. uJ n (projI x))"
have [measurable]: "uI n ∈ borel_measurable MI" for n unfolding uI_proj by auto
have uI_int: "integrable MI (uI n)" for n
unfolding uI_proj by (rule measure_preserving_preserves_integral(1)[OF ‹projI ∈ measure_preserving MI MJ› uJ_int])
have "(∫x. uJ n x ∂MJ) = (∫x. uI n x ∂MI)" for n
unfolding uI_proj by (rule measure_preserving_preserves_integral(2)[OF ‹projI ∈ measure_preserving MI MJ› uJ_int])
then have uI_int2: "(∫x. uI n x ∂MI) = (∫x. u n x ∂M)" for n
using uJ_int2 by simp
have uI_AE: "AE x in MI. uI (n+m) x ≤ uI n x + uI m (((TI)^^n) x)" for m n
proof -
have "AE x in MI. uJ (n+m) (projI x) ≤ uJ n (projI x) + uJ m (((TJ)^^n) (projI x))"
apply (rule quasi_measure_preserving_AE[OF measure_preserving_is_quasi_measure_preserving[OF ‹projI ∈ measure_preserving MI MJ›]])
using uJ_AE by auto
moreover have "AE x in MI. ((TJ)^^n) (projI x) = projI (((TI)^^n) x)"
using I.qmpt_factor_iterates[OF I.mpt_factor_is_qmpt_factor[OF ‹I.mpt_factor projI MJ TJ›]]
by auto
ultimately show ?thesis unfolding uI_proj by auto
qed
have uI_0: "AE x in MI. (λn. uI n x / n) ⇢ 0"
unfolding uI_proj
apply (rule quasi_measure_preserving_AE[OF measure_preserving_is_quasi_measure_preserving[OF ‹projI ∈ measure_preserving MI MJ›]])
using uJ_0 by simp
text ‹As $uI$ is only a subcocycle almost everywhere, we correct it to get a genuine subcocycle,
to which we will apply Lemma \verb+upper_density_good_direction_invertible+.›
obtain vI where H: "I.subcocycle vI" "AE x in MI. ∀n. vI n x = uI n x"
using I.subcocycle_AE[OF uI_AE uI_int] by blast
have [measurable]: "⋀n. vI n ∈ borel_measurable MI" "⋀n. integrable MI (vI n)"
using I.subcocycle_integrable[OF H(1)] by auto
have "(∫x. vI n x ∂MI) = (∫x. uI n x ∂MI)" for n
apply (rule integral_cong_AE) using H(2) by auto
then have "(∫x. vI n x ∂MI) = (∫x. u n x ∂M)" for n
using uI_int2 by simp
then have "I.subcocycle_avg_ereal vI = subcocycle_avg_ereal u"
unfolding I.subcocycle_avg_ereal_def subcocycle_avg_ereal_def by auto
then have vI_fin: "I.subcocycle_avg_ereal vI > -∞" using subu_fin by simp
have "AE x in MI. (λn. vI n x / n) ⇢ 0"
using uI_0 H(2) by auto
moreover have "AE x in MI. (λn. vI n x / n) ⇢ I.subcocycle_lim vI x"
by (rule I.kingman_theorem_nonergodic(1)[OF H(1) vI_fin])
ultimately have vI_0: "AE x in MI. I.subcocycle_lim vI x = 0"
using LIMSEQ_unique by auto
interpret GKK: Gouezel_Karlsson_Kingman MI TI vI
apply standard
using H(1) vI_fin vI_0 by auto
obtain delta where delta: "⋀l. delta l > 0" "delta ⇢ 0"
"emeasure MI {x ∈ space MI. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} ≥ 1 - d } ≥ 1 - d"
using GKK.upper_density_good_direction_invertible[OF ‹I.invertible_qmpt› ‹d>0› ‹d≤1›] by blast
text ‹Then, we need to go back to the original system, showing that the estimates for $TI$ carry
over. First, we go to $TJ$.›
have BJ: "emeasure MJ {x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d } ≥ 1 - d"
proof -
have *: "AE x in MI. uJ n (projI x) = vI n x" for n
using uI_proj H(2) by auto
have **: "AE x in MI. ∀n. uJ n (projI x) = vI n x"
by (subst AE_all_countable, auto intro: *)
have "AE x in MI. ∀m n. uJ n (projI ((TI^^m) x)) = vI n ((TI^^m) x)"
by (rule I.T_AE_iterates[OF **])
then have "AE x in MI. (∀m n. uJ n (projI ((TI^^m) x)) = vI n ((TI^^m) x)) ∧ (∀n. projI ((TI^^n) x) = (TJ^^n) (projI x))"
using I.qmpt_factor_iterates[OF I.mpt_factor_is_qmpt_factor[OF ‹I.mpt_factor projI MJ TJ›]] by auto
then obtain ZI where ZI: "⋀x. x ∈ space MI - ZI ⟹ (∀m n. uJ n (projI ((TI^^m) x)) = vI n ((TI^^m) x)) ∧ (∀n. projI ((TI^^n) x) = (TJ^^n) (projI x))"
"ZI ∈ null_sets MI"
using AE_E3 by blast
have *: "uJ n (projI x) - uJ (n - l) ((TJ ^^ l) (projI x)) = vI n x - vI (n - l) ((TI ^^ l) x)" if "x ∈ space MI - ZI" for x n l
proof -
have "(TI^^0) x = x" "(TJ^^0) (projI x) = (projI x)" by auto
then show ?thesis using ZI(1)[OF that] by metis
qed
have "projI-`{x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d} ∩ space MI - ZI
= {x ∈ space MI - ZI. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n (projI x) - uJ (n - l) ((TJ ^^ l) (projI x))} ≥ 1 - d}"
by (auto simp add: measurable_space[OF ‹projI ∈ measurable MI MJ›])
also have "... = {x ∈ space MI - ZI. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} ≥ 1 - d}"
using * by auto
also have "... = {x ∈ space MI. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} ≥ 1 - d} - ZI"
by auto
finally have *: "projI-`{x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d} ∩ space MI - ZI
= {x ∈ space MI. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} ≥ 1 - d} - ZI"
by simp
have "emeasure MJ {x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d}
= emeasure MI (projI-`{x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d} ∩ space MI)"
by (rule measure_preservingE(2)[symmetric], auto)
also have "... = emeasure MI ((projI-`{x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d} ∩ space MI) - ZI)"
by (rule emeasure_Diff_null_set[OF ‹ZI ∈ null_sets MI›, symmetric], measurable)
also have "... = emeasure MI ({x ∈ space MI. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} ≥ 1 - d} - ZI)"
using * by simp
also have "... = emeasure MI {x ∈ space MI. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} ≥ 1 - d}"
by (rule emeasure_Diff_null_set[OF ‹ZI ∈ null_sets MI›], measurable)
also have "... ≥ 1-d"
using delta(3) by simp
finally show ?thesis by simp
qed
text ‹Then, we go back to $T$ with virtually the same argument.›
have "emeasure M {x ∈ space M. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} ≥ 1 - d } ≥ 1 - d"
proof -
obtain Z where Z: "⋀x. x ∈ space M - Z ⟹ (∀n. projJ ((T^^n) x) = (TJ^^n) (projJ x))"
"Z ∈ null_sets M"
using AE_E3[OF qmpt_factor_iterates[OF mpt_factor_is_qmpt_factor[OF ‹mpt_factor projJ MJ TJ›]]] by blast
have *: "uJ n (projJ x) - uJ (n - l) ((TJ ^^ l) (projJ x)) = u n x - u (n - l) ((T^^ l) x)" if "x ∈ space M - Z" for x n l
proof -
have "(T^^0) x = x" "(TJ^^0) (projJ x) = (projJ x)" by auto
then show ?thesis using Z(1)[OF that] uJ_proj by metis
qed
have "projJ-`{x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d} ∩ space M - Z
= {x ∈ space M - Z. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n (projJ x) - uJ (n - l) ((TJ ^^ l) (projJ x))} ≥ 1 - d}"
by (auto simp add: measurable_space[OF ‹projJ ∈ measurable M MJ›])
also have "... = {x ∈ space M - Z. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} ≥ 1 - d}"
using * by auto
also have "... = {x ∈ space M. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} ≥ 1 - d} - Z"
by auto
finally have *: "projJ-`{x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d} ∩ space M - Z
= {x ∈ space M. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} ≥ 1 - d} - Z"
by simp
have "emeasure MJ {x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d}
= emeasure M (projJ-`{x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d} ∩ space M)"
by (rule measure_preservingE(2)[symmetric], auto)
also have "... = emeasure M ((projJ-`{x ∈ space MJ. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} ≥ 1 - d} ∩ space M) - Z)"
by (rule emeasure_Diff_null_set[OF ‹Z ∈ null_sets M›, symmetric], measurable)
also have "... = emeasure M ({x ∈ space M. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} ≥ 1 - d} - Z)"
using * by simp
also have "... = emeasure M {x ∈ space M. upper_asymptotic_density {n. ∀l∈{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} ≥ 1 - d}"
by (rule emeasure_Diff_null_set[OF ‹Z ∈ null_sets M›], measurable)
finally show ?thesis using BJ by simp
qed
then show ?thesis using delta(1) delta(2) by auto
qed
text ‹From the quantitative lemma above, we deduce the qualitative statement we are after,
still in the setting of the locale.›
lemma infinite_AE:
shows "AE x in M. ∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
(infinite {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l})"
proof -
have "∃deltaf::real ⇒ nat ⇒ real. ∀d. ((d > 0 ∧ d ≤ 1) ⟶ ((∀l. deltaf d l > 0) ∧ (deltaf d ⇢ 0) ∧
emeasure M {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l} ≥ 1-d} ≥ ennreal(1-d)))"
apply (subst choice_iff'[symmetric]) using upper_density_good_direction by auto
then obtain deltaf::"real ⇒ nat ⇒ real" where H: "⋀d. d > 0 ∧ d ≤1 ⟹ (∀l. deltaf d l > 0) ∧ (deltaf d ⇢ 0) ∧
emeasure M {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l} ≥ 1-d} ≥ ennreal(1-d)"
by blast
define U where "U = (λd. {x ∈ space M. upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l} ≥ 1-d})"
have [measurable]: "U d ∈ sets M" for d
unfolding U_def by auto
have *: "emeasure M (U d) ≥ 1 - d" if "d>0 ∧ d≤ 1" for d
unfolding U_def using H that by auto
define V where "V = (⋃n::nat. U (1/(n+2)))"
have [measurable]: "V ∈ sets M"
unfolding V_def by auto
have a: "emeasure M V ≥ 1 - 1 / (n + 2)" for n::nat
proof -
have "1 - 1 / (n + 2) = 1 - 1 / (real n + 2)"
by auto
also have "... ≤ emeasure M (U (1/(real n+2)))"
using *[of "1 / (real n + 2)"] by auto
also have "... ≤ emeasure M V"
apply (rule Measure_Space.emeasure_mono) unfolding V_def by auto
finally show ?thesis by simp
qed
have b: "(λn::nat. 1 - 1 / (n + 2)) ⇢ ennreal(1 - 0)"
by (intro tendsto_intros LIMSEQ_ignore_initial_segment)
have "emeasure M V ≥ 1 - 0"
apply (rule Lim_bounded) using a b by auto
then have "emeasure M V = 1"
by (simp add: emeasure_ge_1_iff)
then have "AE x in M. x ∈ V"
by (simp add: emeasure_eq_measure prob_eq_1)
moreover
{
fix x assume "x ∈ V"
then obtain n::nat where "x ∈ U (1/(real n+2))" unfolding V_def by blast
define d where "d = 1/(real n + 2)"
have "0 < d" "d≤1" unfolding d_def by auto
have "0 < 1-d" unfolding d_def by auto
also have "... ≤ upper_asymptotic_density {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l}"
using ‹x ∈ U (1/(real n+2))› unfolding U_def d_def by auto
finally have "infinite {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l}"
using upper_asymptotic_density_finite by force
then have "∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
(infinite {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l})"
using H ‹0 < d› ‹d≤1› by auto
}
ultimately show ?thesis by auto
qed
end
text ‹Finally, we obtain the full statement, by reducing to the previous situation where the
asymptotic average vanishes.›
theorem (in pmpt) Gouezel_Karlsson_Kingman:
assumes "subcocycle u" "subcocycle_avg_ereal u > -∞"
shows "AE x in M. ∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
(infinite {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - delta l * l})"
proof -
have [measurable]: "integrable M (u n)" "u n ∈ borel_measurable M" for n
using subcocycle_integrable[OF assms(1)] by auto
define v where "v = birkhoff_sum (λx. -subcocycle_lim u x)"
have int [measurable]: "integrable M (λx. -subcocycle_lim u x)"
using kingman_theorem_nonergodic(2)[OF assms] by auto
have "subcocycle v" unfolding v_def
apply (rule subcocycle_birkhoff)
using assms ‹integrable M (λx. -subcocycle_lim u x)› unfolding subcocycle_def by auto
have "subcocycle_avg_ereal v > - ∞"
unfolding v_def using subcocycle_avg_ereal_birkhoff[OF int] kingman_theorem_nonergodic(2)[OF assms] by auto
have "AE x in M. subcocycle_lim v x = real_cond_exp M Invariants (λx. -subcocycle_lim u x) x"
unfolding v_def by (rule subcocycle_lim_birkhoff[OF int])
moreover have "AE x in M. real_cond_exp M Invariants (λx. - subcocycle_lim u x) x = - subcocycle_lim u x"
by (rule real_cond_exp_F_meas[OF int], auto)
ultimately have AEv: "AE x in M. subcocycle_lim v x = - subcocycle_lim u x"
by auto
define w where "w = (λn x. u n x + v n x)"
have a: "subcocycle w"
unfolding w_def by (rule subcocycle_add[OF assms(1) ‹subcocycle v›])
have b: "subcocycle_avg_ereal w > -∞"
unfolding w_def by (rule subcocycle_avg_add(1)[OF assms(1) ‹subcocycle v› assms(2) ‹subcocycle_avg_ereal v > - ∞›])
have "AE x in M. subcocycle_lim w x = subcocycle_lim u x + subcocycle_lim v x"
unfolding w_def by (rule subcocycle_lim_add[OF assms(1) ‹subcocycle v› assms(2) ‹subcocycle_avg_ereal v > - ∞›])
then have c: "AE x in M. subcocycle_lim w x = 0"
using AEv by auto
interpret Gouezel_Karlsson_Kingman M T w
apply standard using a b c by auto
have "AE x in M. ∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
(infinite {n. ∀l ∈ {1..n}. w n x - w (n-l) ((T^^l) x) > - delta l * l})"
using infinite_AE by auto
moreover
{
fix x assume H: "∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
(infinite {n. ∀l ∈ {1..n}. w n x - w (n-l) ((T^^l) x) > - delta l * l})"
"x ∈ space M"
have *: "v n x = - n * subcocycle_lim u x" for n
unfolding v_def using birkhoff_sum_of_invariants[OF _ ‹x ∈ space M›] by auto
have **: "v n ((T^^l) x) = - n * subcocycle_lim u x" for n l
proof -
have "v n ((T^^l) x) = - n * subcocycle_lim u ((T^^l) x)"
unfolding v_def using birkhoff_sum_of_invariants[OF _ T_spaceM_stable(2)[OF ‹x ∈ space M›]] by auto
also have "... = - n * subcocycle_lim u x"
using Invariants_func_is_invariant_n[OF subcocycle_lim_meas_Inv(2) ‹x ∈ space M›] by auto
finally show ?thesis by simp
qed
have "w n x - w (n-l) ((T^^l) x) = u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x" if "l ∈ {1..n}" for n l
unfolding w_def using *[of n] **[of "n-l" l] that apply (auto simp add: algebra_simps)
by (metis comm_semiring_class.distrib diff_add_inverse nat_le_iff_add of_nat_add)
then have "∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
(infinite {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - delta l * l})"
using H(1) by auto
}
ultimately show ?thesis by auto
qed
text ‹The previous theorem only contains a lower bound. The corresponding upper bound follows
readily from Kingman's theorem. The next statement combines both upper and lower bounds.›
theorem (in pmpt) Gouezel_Karlsson_Kingman':
assumes "subcocycle u" "subcocycle_avg_ereal u > -∞"
shows "AE x in M. ∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
(infinite {n. ∀l ∈ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l})"
proof -
{
fix x assume x: "∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
(infinite {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - delta l * l})"
"(λl. u l x/l) ⇢ subcocycle_lim u x"
then obtain alpha::"nat ⇒ real" where a: "⋀l. alpha l > 0" "alpha ⇢ 0"
"infinite {n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - alpha l * l}"
by auto
define delta::"nat ⇒ real" where "delta = (λl. alpha l + norm(u l x / l - subcocycle_lim u x))"
{
fix n assume *: "∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - alpha l * l"
have H: "x > -a ⟹ x < a ⟹ abs x < a" for a x::real by simp
have "abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l" if "l∈{1..n}" for l
proof (rule H)
have "u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x ≤ u l x - l * subcocycle_lim u x"
using assms(1) subcocycle_ineq[OF assms(1), of l "n-l" x] that by auto
also have "... ≤ l * norm(u l x/l - subcocycle_lim u x)"
using that by (auto simp add: algebra_simps divide_simps)
also have "... < delta l * l"
unfolding delta_def using a(1)[of l] that by auto
finally show "u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x < delta l * l" by simp
have "- (delta l * l) ≤ -alpha l * l"
unfolding delta_def by (auto simp add: algebra_simps)
also have "... < u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x"
using * that by auto
finally show "u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > -(delta l * l)"
by simp
qed
then have "∀l ∈ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l"
by auto
}
then have "{n. ∀l ∈ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - alpha l * l}
⊆ {n. ∀l ∈ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l}"
by auto
then have "infinite {n. ∀l ∈ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l}"
using a(3) finite_subset by blast
moreover have "delta ⇢ 0 + 0"
unfolding delta_def using x(2) by (intro tendsto_intros a(2) tendsto_norm_zero LIM_zero)
moreover have "delta l > 0" for l unfolding delta_def using a(1)[of l] by auto
ultimately have "∃delta::nat⇒real. (∀l. delta l > 0) ∧ (delta ⇢ 0) ∧
(infinite {n. ∀l ∈ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l})"
by auto
}
then show ?thesis
using Gouezel_Karlsson_Kingman[OF assms] kingman_theorem_nonergodic(1)[OF assms] by auto
qed
end
Theory Kohlberg_Neyman_Karlsson
section ‹A theorem by Kohlberg and Neyman›
theory Kohlberg_Neyman_Karlsson
imports Fekete
begin
text ‹In this section, we prove a theorem due to Kohlberg and Neyman: given a semicontraction
$T$ of a euclidean space, then $T^n(0)/n$ converges when $n \to \infty$. The proof we give
is due to Karlsson. It mainly builds on subadditivity ideas. The geometry of the space
is essentially not relevant except at the very end of the argument, where strict convexity
comes into play.›
text ‹We recall Fekete's lemma: if a sequence is subadditive (i.e.,
$u_{n+m}\leq u_n + u_m$), then $u_n/n$ converges to its infimum. It is proved
in a different file, but we recall the statement for self-containedness.›
lemma fekete:
fixes u::"nat ⇒ real"
assumes "⋀n m. u (m+n) ≤ u m + u n"
"bdd_below {u n/n | n. n>0}"
shows "(λn. u n/n) ⇢ Inf {u n/n | n. n>0}"
apply (rule subadditive_converges_bounded) unfolding subadditive_def using assms by auto
text ‹A real sequence tending to infinity has infinitely many high-scores, i.e.,
there are infinitely many times where it is larger than all its previous values.›
lemma high_scores:
fixes u::"nat ⇒ real" and i::nat
assumes "u ⇢ ∞"
shows "∃n ≥ i. ∀l ≤ n. u l ≤ u n"
proof -
define M where "M = Max {u l|l. l < i}"
define n where "n = Inf {m. u m > M}"
have "eventually (λm. u m > M) sequentially"
using assms by (simp add: filterlim_at_top_dense tendsto_PInfty_eq_at_top)
then have "{m. u m > M} ≠ {}" by fastforce
then have "n ∈ {m. u m > M}" unfolding n_def using Inf_nat_def1 by metis
then have "u n > M" by simp
have "n ≥ i"
proof (rule ccontr)
assume " ¬ i ≤ n"
then have *: "n < i" by simp
have "u n ≤ M" unfolding M_def apply (rule Max_ge) using * by auto
then show False using ‹u n > M› by auto
qed
moreover have "u l ≤ u n" if "l ≤ n" for l
proof (cases "l = n")
case True
then show ?thesis by simp
next
case False
then have "l < n" using ‹l ≤ n› by auto
then have "l ∉ {m. u m > M}"
unfolding n_def by (meson bdd_below_def cInf_lower not_le zero_le)
then show ?thesis using ‹u n > M› by auto
qed
ultimately show ?thesis by auto
qed
text ‹Hahn-Banach in euclidean spaces: given a vector $u$, there exists a unit norm
vector $v$ such that $\langle u, v \rangle = \|u\|$ (and we put a minus sign as we will
use it in this form). This uses the fact that, in Isabelle/HOL, euclidean spaces
have positive dimension by definition.›
lemma select_unit_norm:
fixes u::"'a::euclidean_space"
shows "∃v. norm v = 1 ∧ v ∙ u = - norm u"
proof (cases "u = 0")
case True
then show ?thesis using norm_Basis nonempty_Basis by fastforce
next
case False
show ?thesis
apply (rule exI[of _ "-u/⇩R norm u"])
using False by (auto simp add: dot_square_norm power2_eq_square)
qed
text ‹We set up the assumption that we will use until the end of this file,
in the following locale: we fix a semicontraction $T$ of a euclidean space.
Our goal will be to show that such a semicontraction has an asymptotic translation vector.›
locale Kohlberg_Neyman_Karlsson =
fixes T::"'a::euclidean_space ⇒ 'a"
assumes semicontract: "dist (T x) (T y) ≤ dist x y"
begin
text ‹The iterates of $T$ are still semicontractions, by induction.›
lemma semicontract_Tn:
"dist ((T^^n) x) ((T^^n) y) ≤ dist x y"
apply (induction n, auto) using semicontract order_trans by blast
text ‹The main quantity we will use is the distance from the origin to its image under $T^n$.
We denote it by $u_n$. The main point is that it is subadditive by semicontraction, hence
it converges to a limit $A$ given by $Inf \{u_n/n\}$, thanks to Fekete Lemma.›
definition u::"nat ⇒ real"
where "u n = dist 0 ((T^^n) 0)"
definition A::real
where "A = Inf {u n/n | n. n>0}"
lemma Apos: "A ≥ 0"
unfolding A_def u_def by (rule cInf_greatest, auto)
lemma Alim:"(λn. u n/n) ⇢ A"
unfolding A_def proof (rule fekete)
show "bdd_below {u n / real n |n. 0 < n}"
unfolding u_def bdd_below_def by (rule exI[of _ 0], auto)
fix m n
have "u (m+n) = dist 0 ((T^^(m+n)) 0)"
unfolding u_def by simp
also have "... ≤ dist 0 ((T^^m) 0) + dist ((T^^m) 0) ((T^^(m+n)) 0)"
by (rule dist_triangle)
also have "... = dist 0 ((T^^m) 0) + dist ((T^^m) 0) ((T^^m) ((T^^n) 0))"
by (auto simp add: funpow_add)
also have "... ≤ dist 0 ((T^^m) 0) + dist 0 ((T^^n) 0)"
using semicontract_Tn[of m] add_mono_thms_linordered_semiring(2) by blast
also have "... = u m + u n"
unfolding u_def by auto
finally show "u (m+n) ≤ u m + u n" by auto
qed
text ‹The main fact to prove the existence of an asymptotic translation vector for $T$
is the following proposition: there exists a unit norm vector $v$ such that $T^\ell(0)$ is in
the half-space at distance $A \ell$ of the origin directed by $v$.
The idea of the proof is to find such a vector $v_i$ that works (with a small error $\epsilon_i > 0$)
for times up to a time $n_i$, and then take a limit by compactness (or weak compactness, but
since we are in finite dimension, compactness works fine). Times $n_i$ are chosen to be large
high scores of the sequence $u_n - (A-\epsilon_i) n$, which tends to infinity since $u_n/n$
tends to $A$.›
proposition half_space:
"∃v. norm v = 1 ∧ (∀l. v ∙ (T ^^ l) 0 ≤ - A * l)"
proof -
define eps::"nat ⇒ real" where "eps = (λi. 1/of_nat (i+1))"
have "eps i > 0" for i unfolding eps_def by auto
have "eps ⇢ 0"
unfolding eps_def using LIMSEQ_ignore_initial_segment[OF lim_1_over_n, of 1] by simp
have vi: "∃vi. norm vi = 1 ∧ (∀l ≤ i. vi ∙ (T ^^ l) 0 ≤ (- A + eps i) * l)" for i
proof -
have L: "(λn. ereal(u n - (A - eps i) * n)) ⇢ ∞"
proof (rule Lim_transform_eventually)
have "ereal ((u n/n - A) + eps i) * ereal n = ereal(u n - (A - eps i) * n)" if "n ≥ 1" for n
using that by (auto simp add: divide_simps algebra_simps)
then show "eventually (λn. ereal ((u n/n - A) + eps i) * ereal n = ereal(u n - (A - eps i) * n)) sequentially"
unfolding eventually_sequentially by auto
have "(λn. (ereal ((u n/n - A) + eps i)) * ereal n) ⇢ (0 + eps i) * ∞"
apply (intro tendsto_intros)
using ‹eps i > 0› Alim by (auto simp add: LIM_zero)
then show "(λn. ereal (u n / real n - A + eps i) * ereal (real n)) ⇢ ∞"
using ‹eps i > 0› by simp
qed
obtain n where n: "n ≥ i" "⋀l. l ≤ n ⟹ u l - (A - eps i) * l ≤ u n - (A - eps i) * n"
using high_scores[OF L, of i] by auto
obtain vi where vi: "norm vi = 1" "vi ∙ ((T^^n) 0) = - norm ((T^^n) 0)"
using select_unit_norm by auto
have "vi ∙ (T ^^ l) 0 ≤ (- A + eps i) * l" if "l ≤ i" for l
proof -
have *: "n = l + (n-l)" using that ‹n ≥ i› by auto
have **: "real (n-l) = real n - real l" using that ‹n ≥ i› by auto
have "vi ∙ (T ^^ l) 0 = vi ∙ ((T ^^ l) 0 - (T^^n) 0) + vi ∙ ((T^^n) 0)"
by (simp add: inner_diff_right)
also have "... ≤ norm vi * norm (((T ^^ l) 0 - (T^^n) 0)) + vi ∙ ((T^^n) 0)"
by (simp add: norm_cauchy_schwarz)
also have "... = dist ((T^^l)(0)) ((T^^n) 0) - norm ((T^^n) 0)"
using vi by (auto simp add: dist_norm)
also have "... = dist ((T^^l)(0)) ((T^^l) ((T^^(n-l)) 0)) - norm ((T^^n) 0)"
by (metis * funpow_add o_apply)
also have "... ≤ dist 0 ((T^^(n-l)) 0) - norm ((T^^n) 0)"
using semicontract_Tn[of l 0 "(T^^(n-l)) 0"] by auto
also have "... = u (n-l) - u n"
unfolding u_def by auto
also have "... ≤ - (A - eps i) * l"
using n(2)[of "n-l"] unfolding ** by (auto simp add: algebra_simps)
finally show ?thesis by auto
qed
then show ?thesis using vi(1) by auto
qed
have "∃V::(nat ⇒ 'a). ∀i. norm (V i) = 1 ∧ (∀l≤i. V i ∙ (T ^^ l) 0 ≤ (- A + eps i) * l)"
apply (rule choice) using vi by auto
then obtain V::"nat ⇒ 'a" where V: "⋀i. norm (V i) = 1" "⋀l i. l ≤ i ⟹ V i ∙ (T ^^ l) 0 ≤ (- A + eps i) * l"
by auto
have "compact (sphere (0::'a) 1)" by simp
moreover have "V i ∈ sphere 0 1" for i using V(1) by auto
ultimately have "∃v ∈ sphere 0 1. ∃r. strict_mono r ∧ (V o r) ⇢ v"
using compact_eq_seq_compact_metric seq_compact_def by metis
then obtain v r where v: "v ∈ sphere 0 1" "strict_mono r" "(V o r) ⇢ v"
by auto
have "v ∙ (T ^^ l) 0 ≤ - A * l" for l
proof -
have *: "(λi. (-A + eps (r i)) * l - V (r i) ∙ (T ^^ l) 0) ⇢ (-A + 0) * l - v ∙ (T ^^ l) 0"
apply (intro tendsto_intros)
using ‹(V o r) ⇢ v› ‹eps ⇢ 0› ‹strict_mono r› LIMSEQ_subseq_LIMSEQ unfolding comp_def by auto
have "eventually (λi. (-A + eps (r i)) * l - V (r i) ∙ (T ^^ l) 0 ≥ 0) sequentially"
unfolding eventually_sequentially apply (rule exI[of _ l])
using V(2)[of l] seq_suble[OF ‹strict_mono r›] apply auto using le_trans by blast
then have " (-A + 0) * l - v ∙ (T ^^ l) 0 ≥ 0"
using LIMSEQ_le_const[OF *, of 0] unfolding eventually_sequentially by auto
then show ?thesis by auto
qed
then show ?thesis using ‹v ∈ sphere 0 1› by auto
qed
text ‹We can now show the existence of an asymptotic translation vector for $T$. It is the vector
$-v$ of the previous proposition: the point $T^\ell(0)$ is in the half-space
at distance $A \ell$ of the origin directed by $v$, and has norm $\sim A \ell$, hence it has
to be essentially $-A v$ by strict convexity of the euclidean norm.›
theorem KNK_thm:
"convergent (λn. ((T^^n) 0) /⇩R n)"
proof -
obtain v where v: "norm v = 1" "⋀l. v ∙ (T ^^ l) 0 ≤ - A * l"
using half_space by auto
have "(λn. norm(((T^^n) 0) /⇩R n + A *⇩R v)^2) ⇢ 0"
proof (rule tendsto_sandwich[of "λ_. 0" _ _ "λn. (norm((T^^n) 0) /⇩R n)^2 - A^2"])
have "norm(((T^^n) 0) /⇩R n + A *⇩R v)^2 ≤ (norm((T^^n) 0) /⇩R n)^2 - A^2" if "n ≥ 1" for n
proof -
have "norm(((T^^n) 0) /⇩R n + A *⇩R v)^2 = norm(((T^^n) 0) /⇩R n)^2 + A * A * (norm v)^2 + 2 * A * inverse n * (v ∙ (T^^n) 0)"
unfolding power2_norm_eq_inner by (auto simp add: inner_commute algebra_simps)
also have "... ≤ norm(((T^^n) 0) /⇩R n)^2 + A * A * (norm v)^2 + 2 * A * inverse n * (-A * n)"
using mult_left_mono[OF v(2)[of n] Apos] ‹n ≥ 1› by (auto, auto simp add: divide_simps)
also have "... = norm(((T^^n) 0) /⇩R n)^2 - A * A"
using ‹n ≥ 1› v(1) by auto
finally show ?thesis by (simp add: power2_eq_square)
qed
then show "eventually (λn. norm ((T ^^ n) 0 /⇩R real n + A *⇩R v)^2 ≤ (norm ((T ^^ n) 0) /⇩R real n)⇧2 - A^2) sequentially"
unfolding eventually_sequentially by auto
have "(λn. (norm ((T ^^ n) 0) /⇩R real n)^2) ⇢ A⇧2"
apply (intro tendsto_intros)
using Alim unfolding u_def by (auto simp add: divide_simps)
then show "(λn. (norm ((T ^^ n) 0) /⇩R real n)⇧2 - A⇧2) ⇢ 0"
by (simp add: LIM_zero)
qed (auto)
then have "(λn. sqrt((norm(((T^^n) 0) /⇩R n + A *⇩R v))^2)) ⇢ sqrt 0"
by (intro tendsto_intros)
then have "(λn. norm((((T^^n) 0) /⇩R n) - (- A *⇩R v))) ⇢ 0"
by auto
then have "(λn. ((T^^n) 0) /⇩R n) ⇢ - A *⇩R v"
using Lim_null tendsto_norm_zero_iff by blast
then show "convergent (λn. ((T^^n) 0) /⇩R n)"
unfolding convergent_def by auto
qed
end
end
Theory Transfer_Operator
section ‹Transfer Operator›
theory Transfer_Operator
imports Recurrence
begin
context qmpt begin
text ‹The map $T$ acts on measures by push-forward. In particular, if $f d\mu$ is absolutely continuous
with respect to the reference measure $\mu$, then its push-forward $T_*(f d\mu)$ is absolutely
continuous with respect to $\mu$, and can therefore be written as $g d\mu$ for some function $g$.
The map $f \mapsto g$, representing the action of $T$ on the level of densities, is called the
transfer operator associated to $T$ and often denoted by $\hat T$.
We first define it on nonnegative functions, using Radon-Nikodym derivatives. Then, we extend it
to general real-valued functions by separating it into positive and negative parts.
The theory presents many similarities with the theory of conditional expectations. Indeed, it is
possible to make a theory encompassing the two. When the map is measure preserving,
there is also a direct relationship: $(\hat T f) \circ T$ is the conditional expectation of $f$
with respect to $T^{-1}B$ where $B$ is the sigma-algebra. Instead of building a general theory,
we copy the proofs for conditional expectations and adapt them where needed.›
subsection ‹The transfer operator on nonnegative functions›
definition nn_transfer_operator :: "('a ⇒ ennreal) ⇒ ('a ⇒ ennreal)"
where
"nn_transfer_operator f = (if f ∈ borel_measurable M then RN_deriv M (distr (density M f) M T)
else (λ_. 0))"
lemma borel_measurable_nn_transfer_operator [measurable]:
"nn_transfer_operator f ∈ borel_measurable M"
unfolding nn_transfer_operator_def by auto
lemma borel_measurable_nn_transfer_operator_iterates [measurable]:
assumes [measurable]: "f ∈ borel_measurable M"
shows "(nn_transfer_operator^^n) f ∈ borel_measurable M"
by (cases n, auto)
text ‹The next lemma is arguably the most fundamental property of the transfer operator: it is the
adjoint of the composition by $T$. If one defined it as an abstract adjoint, it would be defined
on the dual of $L^\infty$, which is a large unwieldy space. The point is that it can be defined
on genuine functions, using the push-forward point of view above. However, once we have this
property, we can forget completely about the definition, since this property characterizes
the transfer operator, as the second lemma below shows.
From this point on, we will only work with it, and forget completely about
the definition using Radon-Nikodym derivatives.
›
lemma nn_transfer_operator_intg:
assumes [measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "(∫⇧+ x. f x * nn_transfer_operator g x ∂M) = (∫⇧+ x. f (T x) * g x ∂M)"
proof -
have *: "density M (RN_deriv M (distr (density M g) M T)) = distr (density M g) M T"
by (rule density_RN_deriv) (auto intro!: quasi_measure_preserving_absolutely_continuous simp add: Tqm)
have "(∫⇧+ x. f x * nn_transfer_operator g x ∂M) = (∫⇧+ x. f x ∂(density M (RN_deriv M (distr (density M g) M T))))"
unfolding nn_transfer_operator_def by (simp add: nn_integral_densityR)
also have "... = (∫⇧+ x. f x ∂(distr (density M g) M T))"
unfolding * by simp
also have "... = (∫⇧+ x. f (T x) ∂(density M g))"
by (rule nn_integral_distr, auto)
also have "... = (∫⇧+ x. f (T x) * g x ∂M)"
by (simp add: nn_integral_densityR)
finally show ?thesis by auto
qed
lemma nn_transfer_operator_intTn_g:
assumes "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "(∫⇧+ x. f x * (nn_transfer_operator^^n) g x ∂M) = (∫⇧+ x. f ((T^^n) x) * g x ∂M)"
proof -
have "⋀f g. f ∈ borel_measurable M ⟹ g ∈ borel_measurable M ⟹ (∫⇧+ x. f x * (nn_transfer_operator^^n) g x ∂M) = (∫⇧+ x. f ((T^^n) x) * g x ∂M)" for n
proof (induction n)
case (Suc n)
have [measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M" by fact+
have "(∫⇧+ x. f x * (nn_transfer_operator ^^ Suc n) g x ∂M) = (∫⇧+ x. f x * (nn_transfer_operator ((nn_transfer_operator^^n) g)) x ∂M)"
apply (rule nn_integral_cong) using funpow.simps(2) unfolding comp_def by auto
also have "... = (∫⇧+ x. f (T x) * (nn_transfer_operator^^n) g x ∂M)"
by (rule nn_transfer_operator_intg, auto)
also have "... = (∫⇧+ x. (λx. f (T x)) ((T^^n) x) * g x ∂M)"
by (rule Suc.IH, auto)
also have "... = (∫⇧+ x. f ((T^^(Suc n)) x) * g x ∂M)"
apply (rule nn_integral_cong) using funpow.simps(2) unfolding comp_def by auto
finally show ?case by auto
qed (simp)
then show ?thesis using assms by auto
qed
lemma nn_transfer_operator_intg_Tn:
assumes "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "(∫⇧+ x. (nn_transfer_operator^^n) g x * f x ∂M) = (∫⇧+ x. g x * f ((T^^n) x) ∂M)"
using nn_transfer_operator_intTn_g[OF assms, of n] by (simp add: algebra_simps)
lemma nn_transfer_operator_charact:
assumes "⋀A. A ∈ sets M ⟹ (∫⇧+ x. indicator A x * g x ∂M) = (∫⇧+ x. indicator A (T x) * f x ∂M)" and
[measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "AE x in M. nn_transfer_operator f x = g x"
proof -
have *:"set_nn_integral M A g = set_nn_integral M A (nn_transfer_operator f)" if [measurable]: "A ∈ sets M" for A
proof -
have "set_nn_integral M A g = (∫⇧+ x. indicator A x * g x ∂M)"
using mult.commute by metis
also have "... = (∫⇧+ x. indicator A (T x) * f x ∂M)"
using assms(1) by auto
also have "... = (∫⇧+ x. indicator A x * nn_transfer_operator f x ∂M)"
by (rule nn_transfer_operator_intg[symmetric], auto)
finally show ?thesis
using mult.commute by (metis (no_types, lifting) nn_integral_cong)
qed
show ?thesis
by (rule sigma_finite_measure.density_unique2, auto simp add: sigma_finite_measure_axioms *)
qed
text ‹When $T$ is measure-preserving, $\hat T(f \circ T) = f$.›
lemma (in mpt) nn_transfer_operator_foT:
assumes [measurable]: "f ∈ borel_measurable M"
shows "AE x in M. nn_transfer_operator (f o T) x = f x"
proof -
have *: "(∫⇧+ x. indicator A x * f x ∂M) = (∫⇧+ x. indicator A (T x) * f (T x) ∂M)" if [measurable]: "A ∈ sets M" for A
by (subst T_nn_integral_preserving[symmetric]) auto
show ?thesis
by (rule nn_transfer_operator_charact) (auto simp add: assms *)
qed
text ‹In general, one only has $\hat T(f\circ T \cdot g) = f \cdot \hat T g$.›
lemma nn_transfer_operator_foT_g:
assumes [measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "AE x in M. nn_transfer_operator (λx. f (T x) * g x) x = f x * nn_transfer_operator g x"
proof -
have *: "(∫⇧+ x. indicator A x * (f x * nn_transfer_operator g x) ∂M) = (∫⇧+ x. indicator A (T x) * (f (T x) * g x) ∂M)"
if [measurable]: "A ∈ sets M" for A
by (simp add: mult.assoc[symmetric] nn_transfer_operator_intg)
show ?thesis
by (rule nn_transfer_operator_charact) (auto simp add: assms *)
qed
lemma nn_transfer_operator_cmult:
assumes [measurable]: "g ∈ borel_measurable M"
shows "AE x in M. nn_transfer_operator (λx. c * g x) x = c * nn_transfer_operator g x"
apply (rule nn_transfer_operator_foT_g) using assms by auto
lemma nn_transfer_operator_zero:
"AE x in M. nn_transfer_operator (λx. 0) x = 0"
using nn_transfer_operator_cmult[of "λx. 0" 0] by auto
lemma nn_transfer_operator_sum:
assumes [measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "AE x in M. nn_transfer_operator (λx. f x + g x) x = nn_transfer_operator f x + nn_transfer_operator g x"
proof (rule nn_transfer_operator_charact)
fix A assume [measurable]: "A ∈ sets M"
have "(∫⇧+ x. indicator A x * (nn_transfer_operator f x + nn_transfer_operator g x) ∂M) =
(∫⇧+ x. indicator A x * nn_transfer_operator f x + indicator A x * nn_transfer_operator g x ∂M)"
by (auto simp add: algebra_simps)
also have "... = (∫⇧+x. indicator A x * nn_transfer_operator f x ∂M) + (∫⇧+x. indicator A x * nn_transfer_operator g x ∂M)"
by (rule nn_integral_add, auto)
also have "... = (∫⇧+x. indicator A (T x) * f x ∂M) + (∫⇧+x. indicator A (T x) * g x ∂M)"
by (simp add: nn_transfer_operator_intg)
also have "... = (∫⇧+x. indicator A (T x) * f x + indicator A (T x) * g x ∂M)"
by (rule nn_integral_add[symmetric], auto)
also have "... = (∫⇧+x. indicator A (T x) * (f x + g x) ∂M)"
by (auto simp add: algebra_simps)
finally show "(∫⇧+ x. indicator A x * (nn_transfer_operator f x + nn_transfer_operator g x) ∂M) = (∫⇧+x. indicator A (T x) * (f x + g x) ∂M)"
by simp
qed (auto simp add: assms)
lemma nn_transfer_operator_cong:
assumes "AE x in M. f x = g x"
and [measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "AE x in M. nn_transfer_operator f x = nn_transfer_operator g x"
apply (rule nn_transfer_operator_charact)
apply (auto simp add: nn_transfer_operator_intg assms intro!: nn_integral_cong_AE)
using assms by auto
lemma nn_transfer_operator_mono:
assumes "AE x in M. f x ≤ g x"
and [measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "AE x in M. nn_transfer_operator f x ≤ nn_transfer_operator g x"
proof -
define h where "h = (λx. g x - f x)"
have [measurable]: "h ∈ borel_measurable M" unfolding h_def by simp
have *: "AE x in M. g x = f x + h x" unfolding h_def using assms(1) by (auto simp: ennreal_ineq_diff_add)
have "AE x in M. nn_transfer_operator g x = nn_transfer_operator (λx. f x + h x) x"
by (rule nn_transfer_operator_cong) (auto simp add: * assms)
moreover have "AE x in M. nn_transfer_operator (λx. f x + h x) x = nn_transfer_operator f x + nn_transfer_operator h x"
by (rule nn_transfer_operator_sum) (auto simp add: assms)
ultimately have "AE x in M. nn_transfer_operator g x = nn_transfer_operator f x + nn_transfer_operator h x" by auto
then show ?thesis by force
qed
subsection ‹The transfer operator on real functions›
text ‹Once the transfer operator of positive functions is defined, the definition for real-valued
functions follows readily, by taking the difference of positive and negative parts.
›
definition real_transfer_operator :: "('a ⇒ real) ⇒ ('a ⇒ real)" where
"real_transfer_operator f =
(λx. enn2real(nn_transfer_operator (λx. ennreal (f x)) x) - enn2real(nn_transfer_operator (λx. ennreal (-f x)) x))"
lemma borel_measurable_transfer_operator [measurable]:
"real_transfer_operator f ∈ borel_measurable M"
unfolding real_transfer_operator_def by auto
lemma borel_measurable_transfer_operator_iterates [measurable]:
assumes [measurable]: "f ∈ borel_measurable M"
shows "(real_transfer_operator^^n) f ∈ borel_measurable M"
by (cases n, auto)
lemma real_transfer_operator_abs:
assumes [measurable]: "f ∈ borel_measurable M"
shows "AE x in M. abs (real_transfer_operator f x) ≤ nn_transfer_operator (λx. ennreal (abs(f x))) x"
proof -
define fp where "fp = (λx. ennreal (f x))"
define fm where "fm = (λx. ennreal (- f x))"
have [measurable]: "fp ∈ borel_measurable M" "fm ∈ borel_measurable M" unfolding fp_def fm_def by auto
have eq: "⋀x. ennreal ¦f x¦ = fp x + fm x" unfolding fp_def fm_def by (simp add: abs_real_def ennreal_neg)
{
fix x assume H: "nn_transfer_operator (λx. fp x + fm x) x = nn_transfer_operator fp x + nn_transfer_operator fm x"
have "¦real_transfer_operator f x¦ ≤ ¦enn2real(nn_transfer_operator fp x)¦ + ¦enn2real(nn_transfer_operator fm x)¦"
unfolding real_transfer_operator_def fp_def fm_def by (auto intro: abs_triangle_ineq4 simp del: enn2real_nonneg)
from ennreal_leI[OF this]
have "abs(real_transfer_operator f x) ≤ nn_transfer_operator fp x + nn_transfer_operator fm x"
by simp (metis add.commute ennreal_enn2real le_iff_add not_le top_unique)
also have "... = nn_transfer_operator (λx. fp x + fm x) x" using H by simp
finally have "abs(real_transfer_operator f x) ≤ nn_transfer_operator (λx. fp x + fm x) x" by simp
}
moreover have "AE x in M. nn_transfer_operator (λx. fp x + fm x) x = nn_transfer_operator fp x + nn_transfer_operator fm x"
by (rule nn_transfer_operator_sum) (auto simp add: fp_def fm_def)
ultimately have "AE x in M. abs(real_transfer_operator f x) ≤ nn_transfer_operator (λx. fp x + fm x) x"
by auto
then show ?thesis using eq by simp
qed
text ‹The next lemma shows that the transfer operator as we have defined it satisfies the basic
duality relation $\int \hat T f \cdot g = \int f \cdot g \circ T$. It follows from the same relation
for nonnegative functions, and splitting into positive and negative parts.
Moreover, this relation characterizes the transfer operator. Hence, once this lemma is proved, we
will never come back to the original definition of the transfer operator.›
lemma real_transfer_operator_intg_fpos:
assumes "integrable M (λx. f (T x) * g x)" and f_pos[simp]: "⋀x. f x ≥ 0" and
[measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "integrable M (λx. f x * real_transfer_operator g x)"
"(∫ x. f x * real_transfer_operator g x ∂M) = (∫ x. f (T x) * g x ∂M)"
proof -
define gp where "gp = (λx. ennreal (g x))"
define gm where "gm = (λx. ennreal (- g x))"
have [measurable]: "gp ∈ borel_measurable M" "gm ∈ borel_measurable M" unfolding gp_def gm_def by auto
define h where "h = (λx. ennreal(abs(g x)))"
have hgpgm: "⋀x. h x = gp x + gm x" unfolding gp_def gm_def h_def by (simp add: abs_real_def ennreal_neg)
have [measurable]: "h ∈ borel_measurable M" unfolding h_def by simp
have pos[simp]: "⋀x. h x ≥ 0" "⋀x. gp x ≥ 0" "⋀x. gm x ≥ 0" unfolding h_def gp_def gm_def by simp_all
have gp_real: "⋀x. enn2real(gp x) = max (g x) 0"
unfolding gp_def by (simp add: max_def ennreal_neg)
have gm_real: "⋀x. enn2real(gm x) = max (-g x) 0"
unfolding gm_def by (simp add: max_def ennreal_neg)
have "(∫⇧+ x. norm(f (T x) * max (g x) 0) ∂M) ≤ (∫⇧+ x. norm(f (T x) * g x) ∂M)"
by (simp add: nn_integral_mono)
also have "... < ∞" using assms(1) by (simp add: integrable_iff_bounded)
finally have "(∫⇧+ x. norm(f (T x) * max (g x) 0) ∂M) < ∞" by simp
then have int1: "integrable M (λx. f (T x) * max (g x) 0)" by (simp add: integrableI_bounded)
have "(∫⇧+ x. norm(f (T x) * max (-g x) 0) ∂M) ≤ (∫⇧+ x. norm(f (T x) * g x) ∂M)"
by (simp add: nn_integral_mono)
also have "... < ∞" using assms(1) by (simp add: integrable_iff_bounded)
finally have "(∫⇧+ x. norm(f (T x) * max (-g x) 0) ∂M) < ∞" by simp
then have int2: "integrable M (λx. f (T x) * max (-g x) 0)" by (simp add: integrableI_bounded)
have "(∫⇧+x. f x * nn_transfer_operator h x ∂M) = (∫⇧+x. f (T x) * h x ∂M)"
by (rule nn_transfer_operator_intg) auto
also have "… = ∫⇧+ x. ennreal (f (T x) * max (g x) 0 + f (T x) * max (- g x) 0) ∂M"
unfolding h_def
by (intro nn_integral_cong)(auto simp: ennreal_mult[symmetric] abs_mult split: split_max)
also have "... < ∞"
using Bochner_Integration.integrable_add[OF int1 int2, THEN integrableD(2)] by (auto simp add: less_top)
finally have *: "(∫⇧+x. f x * nn_transfer_operator h x ∂M) < ∞" by simp
have "(∫⇧+x. norm(f x * real_transfer_operator g x) ∂M) = (∫⇧+x. f x * abs(real_transfer_operator g x) ∂M)"
by (simp add: abs_mult)
also have "... ≤ (∫⇧+x. f x * nn_transfer_operator h x ∂M)"
proof (rule nn_integral_mono_AE)
{
fix x assume *: "abs(real_transfer_operator g x) ≤ nn_transfer_operator h x"
have "ennreal (f x * ¦real_transfer_operator g x¦) = f x * ennreal(¦real_transfer_operator g x¦)"
by (simp add: ennreal_mult)
also have "... ≤ f x * nn_transfer_operator h x"
using * by (auto intro!: mult_left_mono)
finally have "ennreal (f x * ¦real_transfer_operator g x¦) ≤ f x * nn_transfer_operator h x"
by simp
}
then show "AE x in M. ennreal (f x * ¦real_transfer_operator g x¦) ≤ f x * nn_transfer_operator h x"
using real_transfer_operator_abs[OF assms(4)] h_def by auto
qed
finally have **: "(∫⇧+x. norm(f x * real_transfer_operator g x) ∂M) < ∞" using * by auto
show "integrable M (λx. f x * real_transfer_operator g x)"
using ** by (intro integrableI_bounded) auto
have "(∫⇧+x. f x * nn_transfer_operator gp x ∂M) ≤ (∫⇧+x. f x * nn_transfer_operator h x ∂M)"
proof (rule nn_integral_mono_AE)
have "AE x in M. nn_transfer_operator gp x ≤ nn_transfer_operator h x"
by (rule nn_transfer_operator_mono) (auto simp add: hgpgm)
then show "AE x in M. f x * nn_transfer_operator gp x ≤ f x * nn_transfer_operator h x"
by (auto simp: mult_left_mono)
qed
then have a: "(∫⇧+x. f x * nn_transfer_operator gp x ∂M) < ∞"
using * by auto
have "ennreal(norm(f x * enn2real(nn_transfer_operator gp x))) ≤ f x * nn_transfer_operator gp x" for x
by (auto simp add: ennreal_mult intro!: mult_left_mono)
(metis enn2real_ennreal enn2real_nonneg le_cases le_ennreal_iff)
then have "(∫⇧+x. norm(f x * enn2real(nn_transfer_operator gp x)) ∂M) ≤ (∫⇧+x. f x * nn_transfer_operator gp x ∂M)"
by (simp add: nn_integral_mono)
then have "(∫⇧+x. norm(f x * enn2real(nn_transfer_operator gp x)) ∂M) < ∞" using a by auto
then have gp_int: "integrable M (λx. f x * enn2real(nn_transfer_operator gp x))" by (simp add: integrableI_bounded)
have gp_fin: "AE x in M. f x * nn_transfer_operator gp x ≠ ∞"
apply (rule nn_integral_PInf_AE) using a by auto
have "(∫ x. f x * enn2real(nn_transfer_operator gp x) ∂M) = enn2real (∫⇧+ x. f x * enn2real(nn_transfer_operator gp x) ∂M)"
by (rule integral_eq_nn_integral) auto
also have "... = enn2real(∫⇧+ x. ennreal(f (T x) * enn2real(gp x)) ∂M)"
proof -
{
fix x assume "f x * nn_transfer_operator gp x ≠ ∞"
then have "ennreal (f x * enn2real (nn_transfer_operator gp x)) = ennreal (f x) * nn_transfer_operator gp x"
by (auto simp add: ennreal_mult ennreal_mult_eq_top_iff less_top intro!: ennreal_mult_left_cong)
}
then have "AE x in M. ennreal (f x * enn2real (nn_transfer_operator gp x)) = ennreal (f x) * nn_transfer_operator gp x"
using gp_fin by auto
then have "(∫⇧+ x. f x * enn2real(nn_transfer_operator gp x) ∂M) = (∫⇧+ x. f x * nn_transfer_operator gp x ∂M)"
by (rule nn_integral_cong_AE)
also have "... = (∫⇧+ x. f (T x) * gp x ∂M)"
by (rule nn_transfer_operator_intg) (auto simp add: gp_def)
also have "... = (∫⇧+ x. ennreal(f (T x) * enn2real(gp x)) ∂M)"
by (rule nn_integral_cong_AE) (auto simp: ennreal_mult gp_def)
finally have "(∫⇧+ x. f x * enn2real(nn_transfer_operator gp x) ∂M) = (∫⇧+ x. ennreal(f (T x) * enn2real(gp x)) ∂M)" by simp
then show ?thesis by simp
qed
also have "... = (∫ x. f (T x) * enn2real(gp x) ∂M)"
by (rule integral_eq_nn_integral[symmetric]) (auto simp add: gp_def)
finally have gp_expr: "(∫ x. f x * enn2real(nn_transfer_operator gp x) ∂M) = (∫ x. f (T x) * enn2real(gp x) ∂M)" by simp
have "(∫⇧+x. f x * nn_transfer_operator gm x ∂M) ≤ (∫⇧+x. f x * nn_transfer_operator h x ∂M)"
proof (rule nn_integral_mono_AE)
have "AE x in M. nn_transfer_operator gm x ≤ nn_transfer_operator h x"
by (rule nn_transfer_operator_mono) (auto simp add: hgpgm)
then show "AE x in M. f x * nn_transfer_operator gm x ≤ f x * nn_transfer_operator h x"
by (auto simp: mult_left_mono)
qed
then have a: "(∫⇧+x. f x * nn_transfer_operator gm x ∂M) < ∞"
using * by auto
have "⋀x. ennreal(norm(f x * enn2real(nn_transfer_operator gm x))) ≤ f x * nn_transfer_operator gm x"
by (auto simp add: ennreal_mult intro!: mult_left_mono)
(metis enn2real_ennreal enn2real_nonneg le_cases le_ennreal_iff)
then have "(∫⇧+x. norm(f x * enn2real(nn_transfer_operator gm x)) ∂M) ≤ (∫⇧+x. f x * nn_transfer_operator gm x ∂M)"
by (simp add: nn_integral_mono)
then have "(∫⇧+x. norm(f x * enn2real(nn_transfer_operator gm x)) ∂M) < ∞" using a by auto
then have gm_int: "integrable M (λx. f x * enn2real(nn_transfer_operator gm x))" by (simp add: integrableI_bounded)
have gm_fin: "AE x in M. f x * nn_transfer_operator gm x ≠ ∞"
apply (rule nn_integral_PInf_AE) using a by auto
have "(∫ x. f x * enn2real(nn_transfer_operator gm x) ∂M) = enn2real (∫⇧+ x. f x * enn2real(nn_transfer_operator gm x) ∂M)"
by (rule integral_eq_nn_integral) auto
also have "... = enn2real(∫⇧+ x. ennreal(f (T x) * enn2real(gm x)) ∂M)"
proof -
{
fix x assume "f x * nn_transfer_operator gm x ≠ ∞"
then have "ennreal (f x * enn2real (nn_transfer_operator gm x)) = ennreal (f x) * nn_transfer_operator gm x"
by (auto simp add: ennreal_mult ennreal_mult_eq_top_iff less_top intro!: ennreal_mult_left_cong)
}
then have "AE x in M. ennreal (f x * enn2real (nn_transfer_operator gm x)) = ennreal (f x) * nn_transfer_operator gm x"
using gm_fin by auto
then have "(∫⇧+ x. f x * enn2real(nn_transfer_operator gm x) ∂M) = (∫⇧+ x. f x * nn_transfer_operator gm x ∂M)"
by (rule nn_integral_cong_AE)
also have "... = (∫⇧+ x. f (T x) * gm x ∂M)"
by (rule nn_transfer_operator_intg) (auto simp add: gm_def)
also have "... = (∫⇧+ x. ennreal(f (T x) * enn2real(gm x)) ∂M)"
by (rule nn_integral_cong_AE) (auto simp: ennreal_mult gm_def)
finally have "(∫⇧+ x. f x * enn2real(nn_transfer_operator gm x) ∂M) = (∫⇧+ x. ennreal(f (T x) * enn2real(gm x)) ∂M)" by simp
then show ?thesis by simp
qed
also have "... = (∫ x. f (T x) * enn2real(gm x) ∂M)"
by (rule integral_eq_nn_integral[symmetric]) (auto simp add: gm_def)
finally have gm_expr: "(∫ x. f x * enn2real(nn_transfer_operator gm x) ∂M) = (∫ x. f (T x) * enn2real(gm x) ∂M)" by simp
have "(∫ x. f x * real_transfer_operator g x ∂M) = (∫ x. f x * enn2real(nn_transfer_operator gp x) - f x * enn2real(nn_transfer_operator gm x) ∂M)"
unfolding real_transfer_operator_def gp_def gm_def by (simp add: right_diff_distrib)
also have "... = (∫ x. f x * enn2real(nn_transfer_operator gp x) ∂M) - (∫ x. f x * enn2real(nn_transfer_operator gm x) ∂M)"
by (rule Bochner_Integration.integral_diff) (simp_all add: gp_int gm_int)
also have "... = (∫ x. f (T x) * enn2real(gp x) ∂M) - (∫ x. f (T x) * enn2real(gm x) ∂M)"
using gp_expr gm_expr by simp
also have "... = (∫ x. f (T x) * max (g x) 0 ∂M) - (∫ x. f (T x) * max (-g x) 0 ∂M)"
using gp_real gm_real by simp
also have "... = (∫ x. f (T x) * max (g x) 0 - f (T x) * max (-g x) 0 ∂M)"
by (rule Bochner_Integration.integral_diff[symmetric]) (simp_all add: int1 int2)
also have "... = (∫x. f (T x) * g x ∂M)"
by (metis (mono_tags, hide_lams) diff_0 diff_zero eq_iff max.cobounded2 max_def minus_minus neg_le_0_iff_le right_diff_distrib)
finally show "(∫ x. f x * real_transfer_operator g x ∂M) = (∫x. f (T x) * g x ∂M)"
by simp
qed
lemma real_transfer_operator_intg:
assumes "integrable M (λx. f (T x) * g x)" and
[measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "integrable M (λx. f x * real_transfer_operator g x)"
"(∫ x. f x * real_transfer_operator g x ∂M) = (∫ x. f (T x) * g x ∂M)"
proof -
define fp where "fp = (λx. max (f x) 0)"
define fm where "fm = (λx. max (-f x) 0)"
have [measurable]: "fp ∈ borel_measurable M" "fm ∈ borel_measurable M"
unfolding fp_def fm_def by simp_all
have "(∫⇧+ x. norm(fp (T x) * g x) ∂M) ≤ (∫⇧+ x. norm(f (T x) * g x) ∂M)"
by (simp add: fp_def nn_integral_mono)
also have "... < ∞" using assms(1) by (simp add: integrable_iff_bounded)
finally have "(∫⇧+ x. norm(fp (T x) * g x) ∂M) < ∞" by simp
then have intp: "integrable M (λx. fp (T x) * g x)" by (simp add: integrableI_bounded)
moreover have "⋀x. fp x ≥ 0" unfolding fp_def by simp
ultimately have Rp: "integrable M (λx. fp x * real_transfer_operator g x)"
"(∫ x. fp x * real_transfer_operator g x ∂M) = (∫ x. fp (T x) * g x ∂M)"
using real_transfer_operator_intg_fpos by auto
have "(∫⇧+ x. norm(fm (T x) * g x) ∂M) ≤ (∫⇧+ x. norm(f (T x) * g x) ∂M)"
by (simp add: fm_def nn_integral_mono)
also have "... < ∞" using assms(1) by (simp add: integrable_iff_bounded)
finally have "(∫⇧+ x. norm(fm (T x) * g x) ∂M) < ∞" by simp
then have intm: "integrable M (λx. fm (T x) * g x)" by (simp add: integrableI_bounded)
moreover have "⋀x. fm x ≥ 0" unfolding fm_def by simp
ultimately have Rm: "integrable M (λx. fm x * real_transfer_operator g x)"
"(∫ x. fm x * real_transfer_operator g x ∂M) = (∫ x. fm (T x) * g x ∂M)"
using real_transfer_operator_intg_fpos by auto
have "integrable M (λx. fp x * real_transfer_operator g x - fm x * real_transfer_operator g x)"
using Rp(1) Rm(1) integrable_diff by simp
moreover have *: "⋀x. f x * real_transfer_operator g x = fp x * real_transfer_operator g x - fm x * real_transfer_operator g x"
unfolding fp_def fm_def by (simp add: max_def)
ultimately show "integrable M (λx. f x * real_transfer_operator g x)"
by simp
have "(∫ x. f x * real_transfer_operator g x ∂M) = (∫ x. fp x * real_transfer_operator g x - fm x * real_transfer_operator g x ∂M)"
using * by simp
also have "... = (∫ x. fp x * real_transfer_operator g x ∂M) - (∫ x. fm x * real_transfer_operator g x ∂M)"
using Rp(1) Rm(1) by simp
also have "... = (∫ x. fp (T x) * g x ∂M) - (∫ x. fm (T x) * g x ∂M)"
using Rp(2) Rm(2) by simp
also have "... = (∫ x. fp (T x) * g x - fm (T x) * g x ∂M)"
using intm intp by simp
also have "... = (∫ x. f (T x) * g x ∂M)"
unfolding fp_def fm_def by (metis (no_types, hide_lams) diff_0 diff_zero max.commute
max_def minus_minus mult.commute neg_le_iff_le right_diff_distrib)
finally show "(∫ x. f x * real_transfer_operator g x ∂M) = (∫ x. f (T x) * g x ∂M)" by simp
qed
lemma real_transfer_operator_int [intro]:
assumes "integrable M f"
shows "integrable M (real_transfer_operator f)"
"(∫x. real_transfer_operator f x ∂M) = (∫x. f x ∂M)"
using real_transfer_operator_intg[where ?f = "λx. 1" and ?g = f] assms by auto
lemma real_transfer_operator_charact:
assumes "⋀A. A ∈ sets M ⟹ (∫x. indicator A x * g x ∂M) = (∫x. indicator A (T x) * f x ∂M)"
and [measurable]: "integrable M f" "integrable M g"
shows "AE x in M. real_transfer_operator f x = g x"
proof (rule AE_symmetric[OF density_unique_real])
fix A assume [measurable]: "A ∈ sets M"
have "set_lebesgue_integral M A (real_transfer_operator f) = (∫x. indicator A x * real_transfer_operator f x ∂M)"
unfolding set_lebesgue_integral_def by auto
also have "... = (∫x. indicator A (T x) * f x ∂M)"
apply (rule real_transfer_operator_intg, auto)
by (rule Bochner_Integration.integrable_bound[of _ "λx. abs(f x)"], auto simp add: assms indicator_def)
also have "... = set_lebesgue_integral M A g"
unfolding set_lebesgue_integral_def using assms(1)[OF ‹A ∈ sets M›] by auto
finally show "set_lebesgue_integral M A g = set_lebesgue_integral M A (real_transfer_operator f)"
by simp
qed (auto simp add: assms real_transfer_operator_int)
lemma (in mpt) real_transfer_operator_foT:
assumes "integrable M f"
shows "AE x in M. real_transfer_operator (f o T) x = f x"
proof -
have *: "(∫ x. indicator A x * f x ∂M) = (∫x. indicator A (T x) * f (T x) ∂M)" if [measurable]: "A ∈ sets M" for A
apply (subst T_integral_preserving)
using integrable_real_mult_indicator[OF that assms] by (auto simp add: mult.commute)
show ?thesis
apply (rule real_transfer_operator_charact)
using assms * by (auto simp add: comp_def T_integral_preserving)
qed
lemma real_transfer_operator_foT_g:
assumes [measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M" "integrable M (λx. f (T x) * g x)"
shows "AE x in M. real_transfer_operator (λx. f (T x) * g x) x = f x * real_transfer_operator g x"
proof -
have *: "(∫x. indicator A x * (f x * real_transfer_operator g x) ∂M) = (∫x. indicator A (T x) * (f (T x) * g x) ∂M)"
if [measurable]: "A ∈ sets M" for A
apply (simp add: mult.assoc[symmetric])
apply (subst real_transfer_operator_intg)
apply (rule Bochner_Integration.integrable_bound[of _ "(λx. f (T x) * g x)"])
by (auto simp add: assms indicator_def)
show ?thesis
by (rule real_transfer_operator_charact) (auto simp add: assms * intro!: real_transfer_operator_intg)
qed
lemma real_transfer_operator_add [intro]:
assumes [measurable]: "integrable M f" "integrable M g"
shows "AE x in M. real_transfer_operator (λx. f x + g x) x = real_transfer_operator f x + real_transfer_operator g x"
proof (rule real_transfer_operator_charact)
have "integrable M (real_transfer_operator f)" "integrable M (real_transfer_operator g)"
using real_transfer_operator_int(1) assms by auto
then show "integrable M (λx. real_transfer_operator f x + real_transfer_operator g x)"
by auto
fix A assume [measurable]: "A ∈ sets M"
have intAf: "integrable M (λx. indicator A (T x) * f x)"
apply (rule Bochner_Integration.integrable_bound[OF assms(1)]) unfolding indicator_def by auto
have intAg: "integrable M (λx. indicator A (T x) * g x)"
apply (rule Bochner_Integration.integrable_bound[OF assms(2)]) unfolding indicator_def by auto
have "(∫x. indicator A x * (real_transfer_operator f x + real_transfer_operator g x)∂M)
= (∫x. indicator A x * real_transfer_operator f x + indicator A x* real_transfer_operator g x ∂M)"
by (simp add: algebra_simps)
also have "... = (∫x. indicator A x * real_transfer_operator f x ∂M) + (∫x. indicator A x * real_transfer_operator g x ∂M)"
apply (rule Bochner_Integration.integral_add)
using integrable_real_mult_indicator[OF ‹A ∈ sets M› real_transfer_operator_int(1)[OF assms(1)]]
integrable_real_mult_indicator[OF ‹A ∈ sets M› real_transfer_operator_int(1)[OF assms(2)]]
by (auto simp add: mult.commute)
also have "... = (∫x. indicator A (T x) * f x ∂M) + (∫x. indicator A (T x) * g x ∂M)"
using real_transfer_operator_intg(2) assms ‹A ∈ sets M› intAf intAg by auto
also have "... = (∫x. indicator A (T x) * f x + indicator A (T x) * g x ∂M)"
by (rule Bochner_Integration.integral_add[symmetric]) (auto simp add: assms ‹A ∈ sets M› intAf intAg)
also have "... = ∫x. indicator A (T x) * (f x + g x)∂M"
by (simp add: algebra_simps)
finally show "(∫x. indicator A x * (real_transfer_operator f x + real_transfer_operator g x)∂M) = ∫x. indicator A (T x) * (f x + g x)∂M"
by simp
qed (auto simp add: assms)
lemma real_transfer_operator_cong:
assumes ae: "AE x in M. f x = g x" and [measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M"
shows "AE x in M. real_transfer_operator f x = real_transfer_operator g x"
proof -
have "AE x in M. nn_transfer_operator (λx. ennreal (f x)) x = nn_transfer_operator (λx. ennreal (g x)) x"
apply (rule nn_transfer_operator_cong) using assms by auto
moreover have "AE x in M. nn_transfer_operator (λx. ennreal (-f x)) x = nn_transfer_operator (λx. ennreal(-g x)) x"
apply (rule nn_transfer_operator_cong) using assms by auto
ultimately show "AE x in M. real_transfer_operator f x = real_transfer_operator g x"
unfolding real_transfer_operator_def by auto
qed
lemma real_transfer_operator_cmult [intro, simp]:
fixes c::real
assumes "integrable M f"
shows "AE x in M. real_transfer_operator (λx. c * f x) x = c * real_transfer_operator f x"
by (rule real_transfer_operator_foT_g) (auto simp add: assms borel_measurable_integrable)
lemma real_transfer_operator_cdiv [intro, simp]:
fixes c::real
assumes "integrable M f"
shows "AE x in M. real_transfer_operator (λx. f x / c) x = real_transfer_operator f x / c"
using real_transfer_operator_cmult[of _ "1/c", OF assms] by (auto simp add: divide_simps)
lemma real_transfer_operator_diff [intro, simp]:
assumes [measurable]: "integrable M f" "integrable M g"
shows "AE x in M. real_transfer_operator (λx. f x - g x) x = real_transfer_operator f x - real_transfer_operator g x"
proof -
have "AE x in M. real_transfer_operator (λx. f x + (- g x)) x = real_transfer_operator f x + real_transfer_operator (λx. -g x) x"
using real_transfer_operator_add[where ?f = f and ?g = "λx. - g x"] assms by auto
moreover have "AE x in M. real_transfer_operator (λx. -g x) x = - real_transfer_operator g x"
using real_transfer_operator_cmult[where ?f = g and ?c = "-1"] assms(2) by auto
ultimately show ?thesis by auto
qed
lemma real_transfer_operator_pos [intro]:
assumes "AE x in M. f x ≥ 0" and [measurable]: "f ∈ borel_measurable M"
shows "AE x in M. real_transfer_operator f x ≥ 0"
proof -
define g where "g = (λx. max (f x) 0)"
have "AE x in M. f x = g x" using assms g_def by auto
then have *: "AE x in M. real_transfer_operator f x = real_transfer_operator g x" using real_transfer_operator_cong g_def by auto
have "⋀x. g x ≥ 0" unfolding g_def by simp
then have "(λx. ennreal(-g x)) = (λx. 0)"
by (simp add: ennreal_neg)
then have "AE x in M. nn_transfer_operator (λx. ennreal(-g x)) x = 0"
using nn_transfer_operator_zero by simp
then have "AE x in M. real_transfer_operator g x = enn2real(nn_transfer_operator (λx. ennreal (g x)) x)"
unfolding real_transfer_operator_def by auto
then have "AE x in M. real_transfer_operator g x ≥ 0" by auto
then show ?thesis using * by auto
qed
lemma real_transfer_operator_mono:
assumes "AE x in M. f x ≤ g x" and [measurable]: "integrable M f" "integrable M g"
shows "AE x in M. real_transfer_operator f x ≤ real_transfer_operator g x"
proof -
have "AE x in M. real_transfer_operator g x - real_transfer_operator f x = real_transfer_operator (λx. g x - f x) x"
by (rule AE_symmetric[OF real_transfer_operator_diff], auto simp add: assms)
moreover have "AE x in M. real_transfer_operator (λx. g x - f x) x ≥ 0"
by (rule real_transfer_operator_pos, auto simp add: assms(1))
ultimately have "AE x in M. real_transfer_operator g x - real_transfer_operator f x ≥ 0" by auto
then show ?thesis by auto
qed
lemma real_transfer_operator_sum [intro, simp]:
fixes f::"'b ⇒ 'a ⇒ real"
assumes [measurable]: "⋀i. integrable M (f i)"
shows "AE x in M. real_transfer_operator (λx. ∑i∈I. f i x) x = (∑i∈I. real_transfer_operator (f i) x)"
proof (rule real_transfer_operator_charact)
fix A assume [measurable]: "A ∈ sets M"
have *: "integrable M (λx. indicator A (T x) * f i x)" for i
apply (rule Bochner_Integration.integrable_bound[of _ "f i"]) by (auto simp add: assms indicator_def)
have **: "integrable M (λx. indicator A x * real_transfer_operator (f i) x)" for i
apply (rule Bochner_Integration.integrable_bound[of _ "real_transfer_operator (f i)"]) by (auto simp add: assms indicator_def)
have inti: "(∫x. indicator A (T x) * f i x ∂M) = (∫x. indicator A x * real_transfer_operator (f i) x ∂M)" for i
by (rule real_transfer_operator_intg(2)[symmetric], auto simp add: *)
have "(∫x. indicator A (T x) * (∑i∈I. f i x)∂M) = (∫x. (∑i∈I. indicator A (T x) * f i x)∂M)"
by (simp add: sum_distrib_left)
also have "... = (∑i∈I. (∫x. indicator A (T x) * f i x ∂M))"
by (rule Bochner_Integration.integral_sum, simp add: *)
also have "... = (∑i∈I. (∫x. indicator A x * real_transfer_operator (f i) x ∂M))"
using inti by auto
also have "... = (∫x. (∑i∈I. indicator A x * real_transfer_operator (f i) x)∂M)"
by (rule Bochner_Integration.integral_sum[symmetric], simp add: **)
also have "... = (∫x. indicator A x * (∑i∈I. real_transfer_operator (f i) x)∂M)"
by (simp add: sum_distrib_left)
finally show "(∫x. indicator A x * (∑i∈I. real_transfer_operator (f i) x)∂M) = (∫x. indicator A (T x) * (∑i∈I. f i x)∂M)" by auto
qed (auto simp add: assms real_transfer_operator_int(1)[OF assms(1)])
end
subsection ‹Conservativity in terms of transfer operators›
text ‹Conservativity amounts to the fact that $\sum f(T^n x) = \infty$ for almost every $x$
such that $f(x)>0$, if $f$ is nonnegative (see Lemma \verb+recurrent_series_infinite+).
There is a dual formulation, in terms of transfer
operators, asserting that $\sum \hat T^n f(x) = \infty$ for almost every $x$ such that $f(x)>0$.
It is proved by duality, reducing to the previous statement.›
theorem (in conservative) recurrence_series_infinite_transfer_operator:
assumes [measurable]: "f ∈ borel_measurable M"
shows "AE x in M. f x > 0 ⟶ (∑n. (nn_transfer_operator^^n) f x) = ∞"
proof -
define A where "A = {x ∈ space M. f x > 0}"
have [measurable]: "A ∈ sets M"
unfolding A_def by auto
have K: "emeasure M {x ∈ A. (∑n. (nn_transfer_operator^^n) f x) ≤ K} = 0" if "K < ∞" for K
proof (rule ccontr)
assume "emeasure M {x ∈ A. (∑n. (nn_transfer_operator^^n) f x) ≤ K} ≠ 0"
then have *: "emeasure M {x ∈ A. (∑n. (nn_transfer_operator^^n) f x) ≤ K} > 0"
using not_gr_zero by blast
obtain B where B [measurable]: "B ∈ sets M" "B ⊆ {x ∈ A. (∑n. (nn_transfer_operator^^n) f x) ≤ K}" "emeasure M B < ∞" "emeasure M B > 0"
using approx_with_finite_emeasure[OF _ *] by auto
have "f x > 0" if "x ∈ B" for x
using B(2) that unfolding A_def by auto
moreover have "AE x∈B in M. (∑n. indicator B ((T^^n) x)) = (∞::ennreal)"
using recurrence_series_infinite[of "indicator B"] by (auto simp add: indicator_def)
ultimately have PInf: "AE x∈B in M. (∑n. indicator B ((T^^n) x)) * f x = ⊤"
unfolding ennreal_mult_eq_top_iff by fastforce
have "(∫⇧+x. indicator B x * (∑n. (nn_transfer_operator^^n) f x) ∂M) ≤ (∫⇧+x. indicator B x * K ∂M)"
apply (rule nn_integral_mono) using B(2) unfolding indicator_def by auto
also have "... = K * emeasure M B"
by (simp add: mult.commute nn_integral_cmult_indicator)
also have "... < ∞" using ‹K < ∞› B(3)
using ennreal_mult_eq_top_iff top.not_eq_extremum by auto
finally have *: "(∫⇧+x. indicator B x * (∑n. (nn_transfer_operator^^n) f x) ∂M) < ∞" by auto
have "(∫⇧+x. indicator B x * (∑n. (nn_transfer_operator^^n) f x) ∂M)
= (∫⇧+x. (∑n. indicator B x * (nn_transfer_operator^^n) f x) ∂M)"
by auto
also have "... = (∑n. (∫⇧+x. indicator B x * (nn_transfer_operator^^n) f x ∂M))"
by (rule nn_integral_suminf, auto)
also have "... = (∑n. (∫⇧+x. indicator B ((T^^n) x) * f x ∂M))"
using nn_transfer_operator_intTn_g by auto
also have "... = (∫⇧+x. (∑n. indicator B ((T^^n) x) * f x) ∂M)"
by (rule nn_integral_suminf[symmetric], auto)
also have "... = (∫⇧+x. (∑n. indicator B ((T^^n) x)) * f x ∂M)"
by auto
finally have **: "(∫⇧+x. (∑n. indicator B ((T^^n) x)) * f x ∂M) ≠ ∞"
using * by simp
have "AE x in M. (∑n. indicator B ((T^^n) x)) * f x ≠ ∞"
by (rule nn_integral_noteq_infinite[OF _ **], auto)
then have "AE x∈B in M. False"
using PInf by auto
then have "emeasure M B = 0"
by (smt AE_E B(1) Collect_mem_eq Collect_mono_iff dual_order.trans emeasure_eq_0 subsetD sets.sets_into_space)
then show False
using B by auto
qed
have L: "{x ∈ A. (∑n. (nn_transfer_operator^^n) f x) ≤ K} ∈ null_sets M" if "K < ∞" for K
using K[OF that] by auto
have P: "AE x in M. x ∈ A ⟶ (∑n. (nn_transfer_operator^^n) f x) ≥ K" if "K < ∞" for K
using AE_not_in[OF L[OF that]] by auto
have "AE x in M. ∀N::nat. (x ∈ A ⟶ (∑n. (nn_transfer_operator^^n) f x) ≥ of_nat N)"
unfolding AE_all_countable by (auto simp add: of_nat_less_top intro!: P)
then have "AE x in M. f x > 0 ⟶ (∀N::nat. (∑n. (nn_transfer_operator^^n) f x) ≥ of_nat N)"
unfolding A_def by auto
then show "AE x in M. 0 < f x ⟶ (∑n. (nn_transfer_operator ^^ n) f x) = ∞"
using ennreal_ge_nat_imp_PInf by auto
qed
end
Theory Normalizing_Sequences
section ‹Normalizing sequences›
theory Normalizing_Sequences
imports Transfer_Operator Asymptotic_Density
begin
text ‹In this file, we prove the main result in~\cite{gouezel_normalizing_sequences}: in a
conservative system, if a renormalized sequence $S_n f/B_n$ converges in distribution towards
a limit which is not a Dirac mass at $0$, then $B_n$ can not grow exponentially fast. We also prove
the easier result that, in a probability preserving system, normalizing sequences grow at most
polynomially.›
subsection ‹Measure of the preimages of disjoint sets.›
text ‹We start with a general result about conservative maps:
If $A_n$ are disjoint sets, and $P$ is a finite mass measure which is absolutely continuous
with respect to $M$, then $T^{-n}A_n$ is most often small: $P(T^{-n} A_n)$ tends to $0$
in Cesaro average. The proof is written in terms of densities and positive transfer operators,
so we first write it in ennreal.›
theorem (in conservative) disjoint_sets_emeasure_Cesaro_tendsto_zero:
fixes P::"'a measure" and A::"nat ⇒ 'a set"
assumes [measurable]: "⋀n. A n ∈ sets M"
and "disjoint_family A"
"absolutely_continuous M P" "sets P = sets M"
"emeasure P (space M) ≠ ∞"
shows "(λn. (∑i<n. emeasure P (space M ∩ (T^^i)-`(A i)))/n) ⇢ 0"
proof (rule order_tendstoI)
fix delta::ennreal assume "delta > 0"
have "∃epsilon. epsilon ≠ 0 ∧ epsilon ≠ ∞ ∧ 4 * epsilon < delta"
apply (cases delta)
apply (rule exI[of _ "delta/5"]) using ‹delta>0› apply (auto simp add: ennreal_divide_eq_top_iff ennreal_divide_numeral numeral_mult_ennreal intro!: ennreal_lessI)
apply (rule exI[of _ 1]) by auto
then obtain epsilon where "epsilon ≠ 0" "epsilon ≠ ∞" "4 * epsilon < delta"
by auto
then have "epsilon > 0" using not_gr_zero by blast
define L::ennreal where "L = (1/epsilon) * (1+ emeasure P (space M))"
have "L ≠ ∞"
unfolding L_def using assms(5) divide_ennreal_def ennreal_mult_eq_top_iff ‹epsilon ≠ 0› by auto
have "L ≠ 0"
unfolding L_def using ‹epsilon ≠ ∞› by (simp add: ennreal_divide_eq_top_iff)
have "emeasure P (space M) ≤ epsilon * L" unfolding L_def
using ‹epsilon ≠ 0› ‹epsilon ≠ ∞› ‹emeasure P (space M) ≠ ∞›
apply (cases epsilon)
apply (metis (no_types, lifting) add.commute add.right_neutral add_left_mono ennreal_divide_times infinity_ennreal_def mult.left_neutral mult_divide_eq_ennreal zero_le_one)
by simp
then have "emeasure P (space M) / L ≤ epsilon"
using ‹L ≠ 0› ‹L ≠ ∞› by (metis divide_le_posI_ennreal mult.commute not_gr_zero)
then have "c * (emeasure P (space M)/L) ≤ c * epsilon" for c by (rule mult_left_mono, simp)
text ‹We introduce the density of $P$.›
define f where "f = RN_deriv M P"
have [measurable]: "f ∈ borel_measurable M"
unfolding f_def by auto
have [simp]: "P = density M f"
unfolding f_def apply (rule density_RN_deriv[symmetric]) using assms by auto
have "space P = space M"
by auto
interpret Pc: finite_measure P
apply standard unfolding ‹space P = space M› using assms(5) by auto
have *: "AE x in P. eventually (λn. (∑i<n. (nn_transfer_operator^^i) f x) > L * f x) sequentially"
proof -
have "AE x in M. f x ≠ ∞"
unfolding f_def apply (intro RN_deriv_finite Pc.sigma_finite_measure)
unfolding ‹space P = space M› using assms by auto
moreover have "AE x in M. f x > 0 ⟶ (∑n. (nn_transfer_operator^^n) f x) = ∞"
using recurrence_series_infinite_transfer_operator by auto
ultimately have "AE x in M. f x > 0 ⟶ ((∑n. (nn_transfer_operator^^n) f x) = ∞ ∧ f x ≠ ∞)"
by auto
then have AEP: "AE x in P. (∑n. (nn_transfer_operator^^n) f x) = ∞ ∧ f x ≠ ∞"
unfolding ‹P = density M f› using AE_density[of f M] by auto
moreover have "eventually (λn. (∑i<n. (nn_transfer_operator^^i) f x) > L * f x) sequentially"
if "(∑n. (nn_transfer_operator^^n) f x) = ∞ ∧ f x ≠ ∞" for x
proof -
have "(λn. (∑i<n. (nn_transfer_operator^^i) f x)) ⇢ (∑i. (nn_transfer_operator^^i) f x)"
by (simp add: summable_LIMSEQ)
moreover have "(∑i. (nn_transfer_operator^^i) f x) > L * f x"
using that ‹L ≠ ∞› by (auto simp add: ennreal_mult_less_top top.not_eq_extremum)
ultimately show ?thesis
by (rule order_tendstoD(1))
qed
ultimately show ?thesis
by auto
qed
have "∃U N. U ∈ sets P ∧ (∀n ≥ N. ∀x ∈ U. (∑i<n. (nn_transfer_operator^^i) f x) > L * f x) ∧ emeasure P (space P - U) < epsilon"
apply (rule Pc.Egorov_lemma[OF _ *]) using ‹epsilon≠0› by (auto simp add: zero_less_iff_neq_zero)
then obtain U N1 where [measurable]: "U ∈ sets M" and U: "emeasure P (space M - U) < epsilon"
"⋀n x. n ≥ N1 ⟹ x ∈ U ⟹ L * f x < (∑i<n. (nn_transfer_operator^^i) f x)"
unfolding ‹sets P = sets M› ‹space P = space M› by blast
have "U ⊆ space M" by (rule sets.sets_into_space, simp)
define K where "K = N1 + 1"
have "K ≥ N1" "K ≥ 1" unfolding K_def by auto
have *: "K * emeasure P (space M) / epsilon ≠ ∞"
using ‹emeasure P (space M) ≠ ∞› ‹epsilon ≠ 0› ennreal_divide_eq_top_iff ennreal_mult_eq_top_iff by auto
obtain N2::nat where N2: "N2 ≥ K * emeasure P (space M) / epsilon"
using ennreal_archimedean[OF *] by auto
define N where "N = 2 * K + N2"
have "(∑k∈{..<n}. emeasure P (space M ∩ (T^^k)-`(A k))) / n < delta" if "n ≥ N" for n
proof -
have "n ≥ 2 * K" "of_nat n ≥ ((of_nat N2)::ennreal)" using that unfolding N_def by auto
then have "n ≥ K * emeasure P (space M) / epsilon"
using N2 order_trans by blast
then have "K * emeasure P (space M) ≤ n * epsilon"
using ‹epsilon > 0› ‹epsilon ≠ ∞›
by (smt divide_ennreal_def divide_right_mono_ennreal ennreal_mult_divide_eq ennreal_mult_eq_top_iff infinity_ennreal_def mult.commute not_le order_le_less)
have "n ≥ 1" using ‹n ≥ 2 * K› ‹K ≥ 1› by auto
have *: "((∑k∈{K..<n-K}. indicator (A k) ((T^^k) x))::ennreal) ≤ (∑i∈{K..<n}. indicator (A (i-j)) ((T^^(i-j)) x))"
if "j < K" for j x
proof -
have "(∑k ∈ {K..<n-K}. indicator (A k) ((T^^k) x)) ≤ ((∑k∈{K-j..<n-j}. indicator (A k) ((T^^k) x))::ennreal)"
apply (rule sum_mono2) using ‹j < K› by auto
also have "... = (∑i∈{K..<n}. indicator (A (i-j)) ((T^^(i-j)) x))"
apply (rule sum.reindex_bij_betw[symmetric], rule bij_betw_byWitness[of _ "λx. x+j"]) using ‹j < K› by auto
finally show ?thesis by simp
qed
have "L * (∑ k ∈ {K..<n-K}. emeasure P (U ∩ (T^^k)-`(A k))) = L * (∑ k ∈ {K..<n-K}.(∫⇧+x. indicator (U ∩ (T^^k)-`(A k)) x ∂P))"
by auto
also have "... = (∑ k ∈ {K..<n-K}. (∫⇧+x. L * indicator (U ∩ (T^^k)-`(A k)) x ∂P))"
unfolding sum_distrib_left by (intro sum.cong nn_integral_cmult[symmetric], auto)
also have "... = (∑ k ∈ {K..<n-K}. (∫⇧+x. f x * (L * indicator (U ∩ (T^^k)-`(A k)) x) ∂M))"
unfolding ‹P = density M f› by (intro sum.cong nn_integral_density, auto)
also have "... = (∑ k ∈ {K..<n-K}. (∫⇧+x. f x * L * indicator U x * indicator (A k) ((T^^k) x) ∂M))"
by (intro sum.cong nn_integral_cong, auto simp add: algebra_simps indicator_def)
also have "... ≤ (∑ k ∈ {K..<n-K}. (∫⇧+x. (∑j ∈ {..<K}. (nn_transfer_operator^^j) f x) * indicator (A k) ((T^^k) x) ∂M))"
apply (intro sum_mono nn_integral_mono)
using U(2)[OF ‹K ≥ N1›] unfolding indicator_def using less_imp_le by (auto simp add: algebra_simps)
also have "... = (∫⇧+x. (∑k∈{K..<n-K}. (∑j ∈ {..<K}. (nn_transfer_operator^^j) f x * indicator (A k) ((T^^k) x))) ∂M)"
apply (subst nn_integral_sum, simp) unfolding sum_distrib_right by auto
also have "... = (∫⇧+x. (∑j ∈ {..<K}. (∑k∈{K..<n-K}. (nn_transfer_operator^^j) f x * indicator (A k) ((T^^k) x))) ∂M)"
by (rule nn_integral_cong, rule sum.swap)
also have "... = (∑j ∈ {..<K}. (∫⇧+x. (nn_transfer_operator^^j) f x * (∑k∈{K..<n-K}. indicator (A k) ((T^^k) x)) ∂M))"
apply (subst nn_integral_sum, simp) unfolding sum_distrib_left by auto
also have "... ≤ (∑j ∈ {..<K}. (∫⇧+x. (nn_transfer_operator^^j) f x * (∑i∈{K..<n}. indicator (A (i-j)) ((T^^(i-j)) x)) ∂M))"
apply (rule sum_mono, rule nn_integral_mono) using * by (auto simp add: mult_left_mono)
also have "... = (∑i∈{K..<n}. (∑j ∈ {..<K}. (∫⇧+x. (nn_transfer_operator^^j) f x * indicator (A (i-j)) ((T^^(i-j)) x) ∂M)))"
unfolding sum_distrib_left using sum.swap by (subst nn_integral_sum, auto)
also have "... = (∑i∈{K..<n}. (∑j ∈ {..<K}. (∫⇧+x. f x * indicator (A (i-j)) ((T^^(i-j)) ((T^^j) x)) ∂M)))"
by (subst nn_transfer_operator_intg_Tn, auto)
also have "... = (∑i∈{K..<n}. (∫⇧+x. f x * (∑j ∈ {..<K}. indicator (A (i-j)) ((T^^(i-j)) ((T^^j) x))) ∂M))"
unfolding sum_distrib_left by (subst nn_integral_sum, auto)
also have "... = (∑i∈{K..<n}. (∫⇧+x. (∑j ∈ {..<K}. indicator (A (i-j)) ((T^^((i-j)+j)) x)) ∂P))"
unfolding ‹P = density M f› funpow_add comp_def apply (rule sum.cong, simp) by (rule nn_integral_density[symmetric], auto)
also have "... = (∑i∈{K..<n}. (∫⇧+x. (∑j ∈ {..<K}. indicator (A (i-j)) ((T^^i) x)) ∂P))"
by auto
also have "... ≤ (∑i∈{K..<n}. (∫⇧+x. (1::ennreal) ∂P))"
apply (rule sum_mono) apply (rule nn_integral_mono) apply (rule disjoint_family_indicator_le_1)
using assms(2) apply (auto simp add: disjoint_family_on_def)
by (metis Int_iff diff_diff_cancel equals0D le_less le_trans)
also have "... ≤ n * emeasure P (space M)"
using assms(4) by (auto intro!: mult_right_mono)
finally have *: "L * (∑ k ∈ {K..<n-K}. emeasure P (U ∩ (T^^k)-`(A k))) ≤ n * emeasure P (space M)"
by simp
have Ineq: "(∑ k ∈ {K..<n-K}. emeasure P (U ∩ (T^^k)-`(A k))) ≤ n * emeasure P (space M) / L"
using divide_right_mono_ennreal[OF *, of L] ‹L ≠ 0›
by (metis (no_types, lifting) ‹L ≠ ∞› ennreal_mult_divide_eq infinity_ennreal_def mult.commute)
have I: "{..<K} ∪ {K..<n-K} ∪ {n-K..<n} = {..<n}" using ‹n ≥ 2 * K› by auto
have "(∑k∈{..<n}. emeasure P (space M ∩ (T^^k)-`(A k))) ≤ (∑k∈{..<n}. emeasure P (U ∩ (T^^k)-`(A k)) + epsilon)"
proof (rule sum_mono)
fix k
have "emeasure P (space M ∩ (T^^k)-`(A k)) ≤ emeasure P ((U ∩ (T^^k)-`(A k)) ∪ (space M - U))"
by (rule emeasure_mono, auto)
also have "... ≤ emeasure P (U ∩ (T^^k)-`(A k)) + emeasure P (space M - U)"
by (rule emeasure_subadditive, auto)
also have "... ≤ emeasure P (U ∩ (T^^k)-`(A k)) + epsilon"
using U(1) by auto
finally show "emeasure P (space M ∩ (T ^^ k) -` A k) ≤ emeasure P (U ∩ (T ^^ k) -` A k) + epsilon"
by simp
qed
also have "... = (∑k∈{..<K} ∪ {K..<n-K} ∪ {n-K..<n}. emeasure P (U ∩ (T^^k)-`(A k))) + (∑k∈{..<n}. epsilon)"
unfolding sum.distrib I by simp
also have "... = (∑k∈{..<K}. emeasure P (U ∩ (T^^k)-`(A k))) + (∑k∈{K..<n-K}. emeasure P (U ∩ (T^^k)-`(A k)))
+ (∑k∈{n-K..<n}. emeasure P (U ∩ (T^^k)-`(A k))) + n * epsilon"
apply (subst sum.union_disjoint) apply simp apply simp using ‹n ≥ 2 * K›
apply (simp add: ivl_disj_int_one(2) ivl_disj_un_one(2))
by (subst sum.union_disjoint, auto)
also have "... ≤ (∑k∈{..<K}. emeasure P (space M)) + n * emeasure P (space M) / L + (∑k∈{n-K..<n}. emeasure P (space M)) + n * epsilon"
apply (intro add_mono sum_mono Ineq emeasure_mono) using ‹U ⊆ space M› by auto
also have "... = K * emeasure P (space M) + n * emeasure P (space M)/L + K * emeasure P (space M) + n * epsilon"
using ‹n ≥ 2 * K› by auto
also have "... ≤ n * epsilon + n * epsilon + n * epsilon + n * epsilon"
apply (intro add_mono)
using ‹K * emeasure P (space M) ≤ n * epsilon› ‹of_nat n * (emeasure P (space M)/L) ≤ of_nat n * epsilon›
ennreal_times_divide by auto
also have "... = n * (4 * epsilon)"
by (metis (no_types, lifting) add.assoc distrib_right mult.left_commute mult_2 numeral_Bit0)
also have "... < n * delta"
using ‹4 * epsilon < delta› ‹n ≥ 1›
by (simp add: ennreal_mult_strict_left_mono ennreal_of_nat_eq_real_of_nat)
finally show ?thesis
apply (subst divide_less_ennreal)
using ‹n ≥ 1› of_nat_less_top by (auto simp add: mult.commute)
qed
then show "eventually (λn. (∑k∈{..<n}. emeasure P (space M ∩ (T^^k)-`(A k))) / n < delta) sequentially"
unfolding eventually_sequentially by auto
qed (simp)
text ‹We state the previous theorem using measures instead of emeasures. This is clearly
equivalent, but one has to play with ennreal carefully to prove it.›
theorem (in conservative) disjoint_sets_measure_Cesaro_tendsto_zero:
fixes P::"'a measure" and A::"nat ⇒ 'a set"
assumes [measurable]: "⋀n. A n ∈ sets M"
and "disjoint_family A"
"absolutely_continuous M P" "sets P = sets M"
"emeasure P (space M) ≠ ∞"
shows "(λn. (∑i<n. measure P (space M ∩ (T^^i)-`(A i)))/n) ⇢ 0"
proof -
have "space P = space M"
using assms(4) sets_eq_imp_space_eq by blast
moreover have "emeasure P Q ≤ emeasure P (space P)" for Q
by (simp add: emeasure_space)
ultimately have [simp]: "emeasure P Q ≠ ⊤" for Q
using ‹emeasure P (space M) ≠ ∞› neq_top_trans by auto
have *: "ennreal ((∑i<n. measure P (space M ∩ (T^^i)-`(A i)))/n) = (∑i<n. emeasure P (space M ∩ (T^^i)-`(A i)))/n" if "n > 0" for n
apply (subst divide_ennreal[symmetric])
apply (auto intro!: sum_nonneg that simp add: ennreal_of_nat_eq_real_of_nat[symmetric])
apply(subst sum_ennreal[symmetric], simp)
apply (subst emeasure_eq_ennreal_measure) by auto
have "eventually (λn. ennreal ((∑i<n. measure P (space M ∩ (T^^i)-`(A i)))/n) = (∑i<n. emeasure P (space M ∩ (T^^i)-`(A i)))/n) sequentially"
unfolding eventually_sequentially apply (rule exI[of _ 1]) using * by auto
then have *: "(λn. ennreal ((∑i<n. measure P (space M ∩ (T^^i)-`(A i)))/n)) ⇢ ennreal 0"
using disjoint_sets_emeasure_Cesaro_tendsto_zero[OF assms] tendsto_cong by force
show ?thesis
apply (subst tendsto_ennreal_iff[symmetric]) using * apply auto
unfolding eventually_sequentially apply (rule exI[of _ 1])
by (auto simp add: divide_simps intro!: sum_nonneg)
qed
text ‹As convergence to $0$ in Cesaro mean is equivalent to convergence to $0$ along a density
one sequence, we obtain the equivalent formulation of the previous theorem.›
theorem (in conservative) disjoint_sets_measure_density_one_tendsto_zero:
fixes P::"'a measure" and A::"nat ⇒ 'a set"
assumes [measurable]: "⋀n. A n ∈ sets M"
and "disjoint_family A"
"absolutely_continuous M P" "sets P = sets M"
"emeasure P (space M) ≠ ∞"
shows "∃B. lower_asymptotic_density B = 1 ∧ (λn. measure P (space M ∩ (T^^n)-`(A n)) * indicator B n) ⇢ 0"
by (rule cesaro_imp_density_one[OF _ disjoint_sets_measure_Cesaro_tendsto_zero[OF assms]], simp)
subsection ‹Normalizing sequences do not grow exponentially in conservative systems›
text ‹We prove the main result in~\cite{gouezel_normalizing_sequences}: in a
conservative system, if a renormalized sequence $S_n f/B_n$ converges in distribution towards
a limit which is not a Dirac mass at $0$, then $B_n$ can not grow exponentially fast. The proof
is expressed in the following locale. The main theorem is Theorem~\verb+subexponential_growth+
below. To prove it, we need several preliminary estimates.›
text ‹We will use the fact that a real random variables which is not the Dirac mass at $0$
gives positive mass to a set separated away from $0$.›
lemma (in real_distribution) not_Dirac_0_imp_positive_mass_away_0:
assumes "prob {0} < 1"
shows "∃a. a > 0 ∧ prob {x. abs(x) > a} > 0"
proof -
have "1 = prob UNIV"
using prob_space by auto
also have "... = prob {0} + prob (UNIV -{0})"
by (subst finite_measure_Union[symmetric], auto)
finally have "0 < prob (UNIV -{0})"
using assms by auto
also have "... ≤ prob (⋃n::nat. {x. abs(x)>(1/2)^n})"
apply (rule finite_measure_mono)
by (auto, meson one_less_numeral_iff reals_power_lt_ex semiring_norm(76) zero_less_abs_iff)
finally have "prob (⋃n::nat. {x. abs(x)>(1/2)^n}) ≠ 0"
by simp
then have "∃n. prob {x. abs(x)>(1/2)^n} ≠ 0"
using measure_countably_zero[of "λn. {x. abs(x)>(1/2)^n}"] by force
then obtain N where N: "prob {x. abs(x) > (1/2)^N} ≠ 0"
by blast
show ?thesis
apply (rule exI[of _ "(1/2)^N"]) using N by (auto simp add: zero_less_measure_iff)
qed
locale conservative_limit =
conservative M + PS: prob_space P + PZ: real_distribution Z
for M::"'a measure" and P::"'a measure" and Z::"real measure" +
fixes f g::"'a ⇒ real" and B::"nat ⇒ real"
assumes PabsM: "absolutely_continuous M P"
and Bpos: "⋀n. B n > 0"
and M [measurable]: "f ∈ borel_measurable M" "g ∈ borel_measurable M" "sets P = sets M"
and non_trivial: "PZ.prob {0} < 1"
and conv: "weak_conv_m (λn. distr P borel (λx. (g x + birkhoff_sum f n x) / B n)) Z"
begin
text ‹For measurability statements, we want every question about $Z$ or $P$ to reduce to a
question about Borel sets of $M$. We add in the next lemma all the statements that are needed
so that this happens automatically.›
lemma PSZ [simp, measurable_cong]:
"space P = space M"
"h ∈ borel_measurable P ⟷ h ∈ borel_measurable M"
"A ∈ sets P ⟷ A ∈ sets M"
using M sets_eq_imp_space_eq real_distribution_def by auto
text ‹The first nontrivial upper bound is the following lemma, asserting that $B_{n+1}$ can not
be much larger than $\max B_i$ for $i \leq n$. This is proved by saying that $S_{n+1} f = f +
(S_n f) \circ T$, and we know that $S_n f$ is not too large on a set of very large measure, so
the same goes for $(S_n f) \circ T$ by a non-singularity argument. Excepted that the measure $P$
does not have to be nonsingular for the map $T$, so one has to tweak a little bit this idea,
using transfer operators and conservativity. This is easier to do when the density of $P$ is
bounded by $1$, so we first give the proof under this assumption, and then we reduce to this
case by replacing $M$ with $M+P$ in the second lemma below.›
text ‹First, let us prove the lemma assuming that the density $h$ of $P$ is bounded by $1$.›
lemma upper_bound_C_aux:
assumes "P = density M h" "⋀x. h x ≤ 1"
and [measurable]: "h ∈ borel_measurable M"
shows "∃C≥1. ∀n. B (Suc n) ≤ C * Max {B i|i. i ≤ n}"
proof -
obtain a0 where a0: "a0 > 0" "PZ.prob {x. abs(x) > a0} > 0"
using PZ.not_Dirac_0_imp_positive_mass_away_0[OF non_trivial] by blast
define a where "a = a0/2"
have "a > 0" using ‹a0 > 0› unfolding a_def by auto
define alpha where "alpha = PZ.prob {x. abs (x) > a0}/4"
have "alpha > 0" unfolding alpha_def using a0 by auto
have "PZ.prob {x. abs (x) > 2 * a} > 3 * alpha"
using a0 unfolding a_def alpha_def by auto
text ‹First step: choose $K$ such that, with probability $1-\alpha$, one has
$\sum_{1 \leq k < K} h(T^k x) \geq 1$. This follows directly from conservativity.›
have "∃K. K ≥ 1 ∧ PS.prob {x ∈ space M. (∑i∈{1..<K}. h ((T^^i) x)) ≥ 1} ≥ 1 - alpha"
proof -
have *: "AE x in P. eventually (λn. (∑i<n. h ((T^^i) x)) > 2) sequentially"
proof -
have "AE x in M. h x > 0 ⟶ (∑i. h ((T^^i) x)) = ∞"
using recurrence_series_infinite by auto
then have AEP: "AE x in P. (∑i. h ((T^^i) x)) = ∞"
unfolding ‹P = density M h› using AE_density[of h M] by auto
moreover have "eventually (λn. (∑i<n. h ((T^^i) x)) > 2) sequentially"
if "(∑i. h ((T^^i) x)) = ∞" for x
proof -
have "(λn. (∑i<n. h ((T^^i) x))) ⇢ (∑i. h ((T^^i) x))"
by (simp add: summable_LIMSEQ)
moreover have "(∑i. h ((T^^i) x)) > 2"
using that by auto
ultimately show ?thesis
by (rule order_tendstoD(1))
qed
ultimately show ?thesis
by auto
qed
have "∃U N. U ∈ sets P ∧ (∀n ≥ N. ∀x ∈ U. (∑i<n. h ((T^^i) x)) > 2) ∧ emeasure P (space P - U) < alpha"
apply (rule PS.Egorov_lemma)
apply measurable using M(3) measurable_ident_sets apply blast
using * ‹alpha > 0› by auto
then obtain U N1 where [measurable]: "U ∈ sets M" and U: "emeasure P (space M - U) < alpha"
"⋀n x. n ≥ N1 ⟹ x ∈ U ⟹ 2 < (∑i<n. h ((T^^i) x))"
by auto
have "U ⊆ space M" by (rule sets.sets_into_space, simp)
define K where "K = N1+1"
then have "K ≥ 1" by auto
have Ux: "(∑i∈{1..<K}. h ((T^^i) x)) ≥ 1" if "x ∈ U" for x
proof -
have *: "1 < t" if "2 < 1 + t" for t::ennreal
apply (cases t) using that apply auto
by (metis ennreal_add_left_cancel_less ennreal_less_iff ennreal_numeral le_numeral_extra(1) numeral_One one_add_one)
have "2 < (∑i ∈ {..<K}. h ((T^^i) x))"
apply (rule U(2)) unfolding K_def using that by auto
also have "... = (∑i ∈ {0}. h ((T^^i) x)) + (∑i ∈ {1..<K}. h ((T^^i) x))"
apply (subst sum.union_disjoint[symmetric]) apply simp apply simp apply simp
apply (rule sum.cong) using ‹K ≥ 1› by auto
also have "... = h x + (∑i ∈ {1..<K}. h ((T^^i) x))"
by auto
also have "... ≤ 1 + (∑i ∈ {1..<K}. h ((T^^i) x))"
using assms by auto
finally show ?thesis using less_imp_le[OF *] by auto
qed
have "PS.prob {x ∈ space M. (∑i∈{1..<K}. h ((T^^i) x)) ≥ 1} ≥ 1 - alpha"
proof -
have "PS.prob (space P - U) < alpha"
using U(1) by (simp add: PS.emeasure_eq_measure ennreal_less_iff)
then have "1 - alpha < PS.prob U"
using PS.prob_compl by auto
also have "... ≤ PS.prob {x ∈ space M. (∑i∈{1..<K}. h ((T^^i) x)) ≥ 1}"
apply (rule PS.finite_measure_mono) using Ux sets.sets_into_space[OF ‹U ∈ sets M›] by auto
finally show ?thesis by simp
qed
then show ?thesis using ‹K ≥ 1› by auto
qed
then obtain K where K: "K ≥ 1" "PS.prob {x ∈ space M. (∑i∈{1..<K}. h ((T^^i) x)) ≥ 1} ≥ 1 - alpha"
by blast
text ‹Second step: obtain $D$ which controls the tails of the $K$ first Birkhoff sums of $f$.›
have "∃D. PS.prob {x ∈ space M. ∀k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≤ D} ≥ 1 - alpha"
proof -
have D: "∃D. PS.prob {x ∈ space P. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D} < alpha/K ∧ D ≥ 1" for k
apply (rule PS.random_variable_small_tails) using ‹K ≥ 1› ‹alpha > 0› by auto
have "∃D. ∀k. PS.prob {x ∈ space P. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D k} < alpha/K ∧ D k ≥ 1"
apply (rule choice) using D by auto
then obtain D where D: "⋀k. PS.prob {x ∈ space P. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D k} < alpha/K"
by blast
define D0 where "D0 = Max (D`{..K})"
have "PS.prob {x ∈ space M. ∀k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≤ D0} ≥ 1 - alpha"
proof -
have D1: "PS.prob {x ∈ space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D0} < alpha/K" if "k ≤ K" for k
proof -
have "D k ≤ D0"
unfolding D0_def apply (rule Max_ge) using that by auto
have "PS.prob {x ∈ space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D0}
≤ PS.prob {x ∈ space P. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D k}"
apply (rule PS.finite_measure_mono) using ‹D k ≤ D0› by auto
then show ?thesis using D[of k] by auto
qed
have "PS.prob (⋃k∈ {..<K}. {x ∈ space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D0}) ≤
(∑k ∈ {..<K}. PS.prob {x ∈ space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D0})"
by (rule PS.finite_measure_subadditive_finite, auto)
also have "... ≤ (∑k ∈ {..<K}. alpha/K)"
apply (rule sum_mono) using less_imp_le[OF D1] by auto
also have "... = alpha"
using ‹K ≥ 1› by auto
finally have "PS.prob (⋃k∈ {..<K}. {x ∈ space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D0}) ≤ alpha"
by simp
then have "1 - alpha ≤ 1 - PS.prob (⋃k∈ {..<K}. {x ∈ space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D0})"
by simp
also have "... = PS.prob (space P - (⋃k∈ {..<K}. {x ∈ space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≥ D0}))"
by (rule PS.prob_compl[symmetric], auto)
also have "... ≤ PS.prob {x ∈ space M. ∀k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≤ D0}"
by (rule PS.finite_measure_mono, auto)
finally show ?thesis by simp
qed
then show ?thesis by blast
qed
then obtain D where D: "PS.prob {x ∈ space M. ∀k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≤ D} ≥ 1 - alpha"
by blast
text ‹Third step: obtain $\epsilon$ small enough so that, for any set $U$ with probability less
than $\epsilon$ and for any $k\leq K$, one has $\int_U \hat T^k h < \delta$, where $\delta$ is
very small.›
define delta where "delta = alpha/(2 * K)"
then have "delta > 0" using ‹alpha > 0› ‹K ≥ 1› by auto
have "∃epsilon > (0::real). ∀U ∈ sets P. ∀k ≤ K. emeasure P U < epsilon ⟶ (∫⇧+x∈U. ((nn_transfer_operator^^k) h) x ∂P) ≤ delta"
proof -
have *: "∃epsilon>(0::real). ∀U∈sets P. emeasure P U < epsilon ⟶ (∫⇧+x∈U. ((nn_transfer_operator^^k) h) x ∂P) < delta"
for k
proof (rule small_nn_integral_on_small_sets[OF _ ‹0 < delta›])
have "(∫⇧+x. ((nn_transfer_operator^^k) h) x ∂P) = (∫⇧+x. h x * ((nn_transfer_operator^^k) h) x ∂M)"
unfolding ‹P = density M h› by (rule nn_integral_density, auto)
also have "... ≤ (∫⇧+x. 1 * ((nn_transfer_operator^^k) h) x ∂M)"
apply (intro nn_integral_mono mult_right_mono) using assms(2) by auto
also have "... = (∫⇧+x. 1 * h x ∂M)"
by (rule nn_transfer_operator_intTn_g, auto)
also have "... = emeasure P (space M)"
using PS.emeasure_space_1 by (simp add: emeasure_density ‹P = density M h›)
also have "... < ∞"
using PS.emeasure_space_1 by simp
finally show "(∫⇧+x. ((nn_transfer_operator^^k) h) x ∂P) ≠ ∞"
by auto
qed (simp)
have "∃epsilon. ∀k. epsilon k > (0::real) ∧ (∀U∈sets P. emeasure P U < epsilon k ⟶ (∫⇧+x∈U. ((nn_transfer_operator^^k) h) x ∂P) < delta)"
apply (rule choice) using * by blast
then obtain epsilon::"nat ⇒ real" where E: "⋀k. epsilon k > 0"
"⋀k U. U ∈ sets P ⟹ emeasure P U < epsilon k ⟹ (∫⇧+x∈U. ((nn_transfer_operator^^k) h) x ∂P) < delta"
by blast
define epsilon0 where "epsilon0 = Min (epsilon`{..K})"
have "epsilon0 ∈ epsilon`{..K}" unfolding epsilon0_def by (rule Min_in, auto)
then have "epsilon0 > 0" using E(1) by auto
have small_setint: "(∫⇧+x∈U. ((nn_transfer_operator^^k) h) x ∂P) ≤ delta"
if "k ≤ K" "U ∈ sets P" "emeasure P U < epsilon0" for k U
proof -
have *: "epsilon0 ≤ epsilon k"
unfolding epsilon0_def apply (rule Min_le) using ‹k ≤ K› by auto
show ?thesis
apply (rule less_imp_le[OF E(2)[OF ‹U ∈ sets P›]])
using ennreal_leI[OF *] ‹emeasure P U < epsilon0› by auto
qed
then show ?thesis using ‹epsilon0 > 0› by auto
qed
then obtain epsilon::real where "epsilon > 0" and
small_setint: "⋀k U. k ≤ K ⟹ U ∈ sets P ⟹ emeasure P U < epsilon ⟹ (∫⇧+x∈U. ((nn_transfer_operator^^k) h) x ∂P) ≤ delta"
by blast
text ‹Fourth step: obtain an index after which the convergence in distribution ensures that
the probability to be larger than $2 a$ and to be very large is comparable for $(g+S_n f)/B_n$
and for $Z$.›
obtain C0 where "PZ.prob {x. abs(x) ≥ C0} < epsilon" "C0 ≥ 1"
using PZ.random_variable_small_tails[OF ‹epsilon > 0›, of "λx. x"] by auto
have A: "eventually (λn. measure (distr P borel (λx. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) > 2 * a} > 3 * alpha) sequentially"
apply (rule open_set_weak_conv_lsc[of _ Z])
by (auto simp add: PZ.real_distribution_axioms conv ‹PZ.prob {x. abs (x) > 2 * a} > 3 * alpha›)
have B: "eventually (λn. measure (distr P borel (λx. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) ≥ C0} < epsilon) sequentially"
apply (rule closed_set_weak_conv_usc[of _ Z])
by (auto simp add: PZ.real_distribution_axioms conv ‹PZ.prob {x. abs(x) ≥ C0} < epsilon›)
obtain N where N: "⋀n. n ≥ N ⟹ measure (distr P borel (λx. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) > 2 * a} > 3 * alpha"
"⋀n. n ≥ N ⟹ measure (distr P borel (λx. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) ≥ C0} < epsilon"
using eventually_conj[OF A B] unfolding eventually_sequentially by blast
text ‹Fifth step: obtain a trivial control on $B_n$ for $n$ smaller than $N$.›
define C1 where "C1 = Max {B k/B 0 |k. k ≤ N+K+1}"
define C where "C = max (max C0 C1) (max (D / (a * B 0)) (C0/a))"
have "C ≥ 1" unfolding C_def using ‹C0 ≥ 1› by auto
text ‹Now, we can put everything together. If $n$ is large enough, we prove that
$B_{n+1} \leq C \max_{i\leq n} B_i$, by contradiction.›
have geK: "B (Suc n) ≤ C * Max {B i |i. i ≤ n}" if "n > N + K" for n
proof (rule ccontr)
have "Suc n ≥ N" using that by auto
let ?h = "(λx. (g x + birkhoff_sum f (Suc n) x) / B (Suc n))"
have "measure (distr P borel ?h) {x. abs (x) > 2 * a}
= measure P (?h-` {x. abs (x) > 2 * a} ∩ space P)"
by (rule measure_distr, auto)
also have "... = measure P {x ∈ space M. abs(?h x) > 2 * a}"
by (rule HOL.cong[of "measure P"], auto)
finally have A: "PS.prob {x ∈ space M. abs(?h x) > 2 * a} > 3 * alpha"
using N(1)[OF ‹Suc n ≥ N›] by auto
have *: "PS.prob {y ∈ space M. C0 ≤ ¦g y + birkhoff_sum f (Suc n - k) y¦ / ¦B (Suc n - k)¦} < epsilon"
if "k ∈ {1..<K}" for k
proof -
have "Suc n - k ≥ N" using that ‹n > N + K› by auto
let ?h = "(λx. (g x + birkhoff_sum f (Suc n-k) x) / B (Suc n-k))"
have "measure (distr P borel ?h) {x. abs (x) ≥ C0}
= measure P (?h-` {x. abs (x) ≥ C0} ∩ space P)"
by (rule measure_distr, auto)
also have "... = measure P {x ∈ space M. abs(?h x) ≥ C0}"
by (rule HOL.cong[of "measure P"], auto)
finally show ?thesis
using N(2)[OF ‹Suc n - k ≥ N›] by auto
qed
have P_le_epsilon: "emeasure P {y ∈ space M. C0 ≤ ¦g y + birkhoff_sum f (Suc n - k) y¦ / ¦B (Suc n - k)¦} < ennreal epsilon"
if "k ∈ {1..<K}" for k
using *[OF that] ‹epsilon > 0› ennreal_lessI unfolding PS.emeasure_eq_measure by auto
assume "¬(B (Suc n) ≤ C * Max {B i |i. i ≤ n})"
then have "C * Max {B i |i. i ≤ n} ≤ B (Suc n)" by simp
moreover have "C * B 0 ≤ C * Max {B i |i. i ≤ n}"
apply (rule mult_left_mono, rule Max_ge) using ‹C ≥ 1› by auto
ultimately have "C * B 0 ≤ B (Suc n)"
by auto
have "(D / (a * B 0)) * B 0 ≤ C * B 0"
apply (rule mult_right_mono) unfolding C_def using Bpos[of 0] by auto
then have "(D / (a * B 0)) * B 0 ≤ B (Suc n)"
using ‹C * B 0 ≤ B (Suc n)› by simp
then have "D ≤ a * B (Suc n)"
using Bpos[of 0] ‹a > 0› by (auto simp add: divide_simps algebra_simps)
define X where "X = {x ∈ space M. abs((g x + birkhoff_sum f (Suc n) x)/B(Suc n)) > 2 * a}
∩ {x ∈ space M. ∀k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≤ D}
∩ {x ∈ space M. (∑i∈{1..<K}. h ((T^^i) x)) ≥ 1}"
have [measurable]: "X ∈ sets M" unfolding X_def by auto
have "3 * alpha + (1-alpha) + (1-alpha) ≤
PS.prob {x ∈ space M. abs((g x + birkhoff_sum f (Suc n) x)/B(Suc n)) > 2 * a}
+ PS.prob {x ∈ space M. ∀k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) ≤ D}
+ PS.prob {x ∈ space M. (∑i∈{1..<K}. h ((T^^i) x)) ≥ 1}"
using A D K(2) by auto
also have "... ≤ 2 + PS.prob X"
unfolding X_def by (rule PS.sum_measure_le_measure_inter3, auto)
finally have "PS.prob X ≥ alpha" by auto
have I: "(λy. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k))) ((T^^k) x) ≥ C0" if "x ∈ X" "k ∈ {1..<K}" for x k
proof -
have "2 * a * B(Suc n) ≤ abs(g x + birkhoff_sum f (Suc n) x)"
using ‹x ∈ X› Bpos[of "Suc n"] unfolding X_def by (auto simp add: divide_simps)
also have "... = abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x) + (g x + birkhoff_sum f k x - g((T^^k) x)))"
using ‹n > N+K› ‹k ∈ {1..<K}› birkhoff_sum_cocycle[of f k "Suc n - k" x] by auto
also have "... ≤ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x)) + abs(g x + birkhoff_sum f k x - g((T^^k) x))"
by auto
also have "... ≤ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x)) + D"
using ‹x ∈ X› ‹k ∈ {1..<K}› unfolding X_def by auto
also have "... ≤ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x)) + a * B (Suc n)"
using ‹D ≤ a * B (Suc n)› by simp
finally have *: "a * B (Suc n) ≤ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x))"
by simp
have "(C0/a) * B (Suc n - k) ≤ C * B (Suc n - k)"
apply (rule mult_right_mono) unfolding C_def using less_imp_le[OF Bpos] by auto
also have "... ≤ C * Max {B i |i. i ≤ n}"
apply (rule mult_left_mono, rule Max_ge) using ‹k ∈ {1..<K}› ‹C ≥ 1› by auto
also have "... ≤ B (Suc n)"
by fact
finally have "C0 * B (Suc n - k) ≤ a * B (Suc n)"
using ‹a>0› by (simp add: divide_simps algebra_simps)
then have "C0 * B (Suc n - k) ≤ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x))"
using * by auto
then show ?thesis
using Bpos[of "Suc n - k"] by (simp add: divide_simps)
qed
have J: "1 ≤ (∑k∈{1..<K}. (λy. h y * indicator {y ∈ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) ≥ C0} y) ((T^^k) x))"
if "x ∈ X" for x
proof -
have "x ∈ space M"
using ‹x ∈ X› unfolding X_def by auto
have "1 ≤ (∑k ∈ {1..<K}. h ((T^^k) x))"
using ‹x ∈ X› unfolding X_def by auto
also have "... = (∑k ∈ {1..<K}. h ((T^^k) x) * indicator {y ∈ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) ≥ C0} ((T^^k) x))"
apply (rule sum.cong)
unfolding indicator_def using I[OF ‹x ∈ X›] T_spaceM_stable(2)[OF ‹x ∈ space M›] by auto
finally show ?thesis by simp
qed
have "ennreal alpha ≤ emeasure P X"
using ‹PS.prob X ≥ alpha› by (simp add: PS.emeasure_eq_measure)
also have "... = (∫⇧+x. indicator X x ∂P)"
by auto
also have "... ≤ (∫⇧+x. (∑k∈{1..<K}. (λy. h y
* indicator {y ∈ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) ≥ C0} y) ((T^^k) x)) ∂P)"
apply (rule nn_integral_mono) using J unfolding indicator_def by fastforce
also have "... = (∑k∈{1..<K}. (∫⇧+x. (λy. h y
* indicator {y ∈ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) ≥ C0} y) ((T^^k) x) ∂P))"
by (rule nn_integral_sum, auto)
also have "... = (∑k∈{1..<K}. (∫⇧+x. (λy. h y
* indicator {y ∈ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) ≥ C0} y) ((T^^k) x) * h x ∂M))"
unfolding ‹P = density M h› by (auto intro!: sum.cong nn_integral_densityR[symmetric])
also have "... = (∑k∈{1..<K}. (∫⇧+x. h x
* indicator {y ∈ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) ≥ C0} x * ((nn_transfer_operator^^k) h) x ∂M))"
by (auto intro!: sum.cong nn_transfer_operator_intTn_g[symmetric])
also have "... = (∑k∈{1..<K}. (∫⇧+x.
((nn_transfer_operator^^k) h) x * indicator {y ∈ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) ≥ C0} x ∂P))"
unfolding ‹P = density M h› by (subst nn_integral_density, auto intro!: sum.cong simp add: algebra_simps)
also have "... ≤ (∑k∈{1..<K}. ennreal delta)"
by (rule sum_mono, rule small_setint, auto simp add: P_le_epsilon)
also have "... = ennreal (∑k∈{1..<K}. delta)"
using less_imp_le[OF ‹delta > 0›] by (rule sum_ennreal)
finally have "alpha ≤ (∑k∈{1..<K}. delta)"
apply (subst ennreal_le_iff[symmetric]) using ‹delta > 0› by auto
also have "... ≤ K * delta"
using ‹delta > 0› by auto
finally show False
unfolding delta_def using ‹K ≥ 1› ‹alpha > 0› by (auto simp add: divide_simps algebra_simps)
qed
text ‹If $n$ is not large, we get the same bound in a trivial way, as there are only finitely
many cases to consider and we have adjusted the constant $C$ so that it works for all of them.›
have leK: "B (Suc n) ≤ C * Max {B i |i. i ≤ n}" if "n ≤ N+K" for n
proof -
have "B (Suc n)/B 0 ≤ Max {B k/B 0 |k. k ≤ N+K+1}"
apply (rule Max_ge, simp) using ‹n ≤ N+K› by auto
also have "... ≤ C" unfolding C_def C1_def by auto
finally have "B (Suc n) ≤ C * B 0"
using Bpos[of 0] by (simp add: divide_simps)
also have "... ≤ C * Max {B i |i. i ≤ n}"
apply (rule mult_left_mono) apply (rule Max_ge) using ‹C ≥ 1› by auto
finally show ?thesis by simp
qed
have "B (Suc n) ≤ C * Max {B i |i. i ≤ n}" for n
using geK[of n] leK[of n] by force
then show ?thesis
using ‹C ≥ 1› by auto
qed
text ‹Then, we prove the lemma without further assumptions, reducing to the previous case by
replacing $m$ with $m+P$. We do this at the level of densities since the addition of measures
is not defined in the library (and it would be problematic as measures carry their sigma-algebra,
so what should one do when the sigma-algebras do not coincide?)›
lemma upper_bound_C:
"∃C≥1. ∀n. B (Suc n) ≤ C * Max {B i|i. i ≤ n}"
proof -
text ‹We introduce the density of $P$, and show that it is almost everywhere finite.›
define h where "h = RN_deriv M P"
have [measurable]: "h ∈ borel_measurable M"
unfolding h_def by auto
have P [simp]: "P = density M h"
unfolding h_def apply (rule density_RN_deriv[symmetric]) using PabsM by auto
have "space P = space M"
by auto
have *: "AE x in M. h x ≠ ∞"
unfolding h_def apply (rule RN_deriv_finite)
using PS.sigma_finite_measure_axioms PabsM by auto
have **: "null_sets (density M (λx. 1 + h x)) = null_sets M"
by (rule null_sets_density, auto)
text ‹We introduce the new system with invariant measure $M+P$, given by the density $1+h$.›
interpret A: conservative "density M (λx. 1 + h x)" T
apply (rule conservative_density) using * by auto
interpret B: conservative_limit T "density M (λx. 1 + h x)" P Z f g B
apply standard
using conv Bpos non_trivial absolutely_continuousI_density[OF ‹h ∈ borel_measurable M›]
unfolding absolutely_continuous_def ** by auto
text ‹We obtain the result by applying the result above to the new dynamical system.
We have to check the additional assumption that the density of $P$ with respect to the new measure
$M + P$ is bounded by $1$. Since this density if $h/(1+h)$, this is trivial modulo a computation
in ennreal that is not automated (yet?).›
have z: "1 = ennreal 1" by auto
have Trivial: "a = (1+a) * (a/(1+a))" if "a ≠ ⊤" for a::ennreal
apply (cases a) apply auto unfolding z ennreal_plus_if apply (subst divide_ennreal) apply simp apply simp
apply (subst ennreal_mult'[symmetric]) using that by auto
have Trivial2: "a / (1+a) ≤ 1" for a::ennreal
apply (cases a) apply auto unfolding z ennreal_plus_if apply (subst divide_ennreal) by auto
show ?thesis
apply (rule B.upper_bound_C_aux[of "λx. h x/(1 + h x)"])
using * Trivial Trivial2 by (auto simp add: density_density_eq density_unique_iff)
qed
text ‹The second main upper bound is the following. Again, it proves that
$B_{n+1} \leq L \max_{i \leq n} B_i$, for some constant $L$, but with two differences. First,
$L$ only depends on the distribution of $Z$ (which is stronger). Second, this estimate is only
proved along a density $1$ sequence of times (which is weaker). The first point implies that
this lemma will also apply to $T^j$, with the same $L$, which amounts to replacing $L$ by $L^{1/j}$,
making it in practice arbitrarily close to $1$. The second point is problematic at first sight, but
for the exceptional times we will use the bound of the previous lemma so this will not really
create problems.
For the proof, we split the sum $S_{n+1} f$ as $S_n f + f \circ T^n$. If $B_{n+1}$ is much larger
than $B_n$, we deduce that $S_n f$ is much smaller than $S_{n+1}f$ with large probability, which
means that $f \circ T^n$ is larger than anything that has been seen before. Since preimages of
distinct events have a measure that tends to $0$ along a density $1$ subsequence, this can only
happen along a density $0$ subsequence.›
lemma upper_bound_L:
fixes a::real and L::real and alpha::real
assumes "a > 0" "alpha > 0" "L > 3"
"PZ.prob {x. abs (x) > 2 * a} > 3 * alpha"
"PZ.prob {x. abs (x) ≥ (L-1) * a} < alpha"
shows "∃A. lower_asymptotic_density A = 1 ∧ (∀n∈A. B (Suc n) ≤ L * Max {B i|i. i ≤ n})"
proof -
define m where "m = (λn. Max {B i|i. i ≤ n})"
define K where "K = (λn::nat. {x ∈ space M. abs(f x) ∈ {a * L * m n <..< a * L * m (Suc n)}})"
have [measurable]: "K n ∈ sets M" for n
unfolding K_def by auto
have *: "m n ≤ m p" if "n ≤ p" for n p
unfolding m_def K_def using that by (auto intro!: Max_mono)
have "K n ∩ K p = {}" if "n < p" for n p
proof (auto simp add: K_def)
fix x assume "¦f x¦ < a * L * m (Suc n)" "a * L * m p < ¦f x¦"
moreover have "a * L * m (Suc n) ≤ a * L * m p"
using *[of "Suc n" p] that ‹a > 0› ‹L > 3› by auto
ultimately show False by auto
qed
then have "disjoint_family K"
unfolding disjoint_family_on_def using nat_neq_iff by auto
have "∃A0. lower_asymptotic_density A0 = 1 ∧
(λn. measure P (space M ∩ (T^^n)-`(K n)) * indicator A0 n) ⇢ 0"
apply (rule disjoint_sets_measure_density_one_tendsto_zero) apply fact+
using PabsM by auto
then obtain A0 where A0: "lower_asymptotic_density A0 = 1" "(λn. measure P (space M ∩ (T^^n)-`(K n)) * indicator A0 n) ⇢ 0"
by blast
obtain N0 where N0: "⋀n. n ≥ N0 ⟹ measure P (space M ∩ (T^^n)-`(K n)) * indicator A0 n < alpha"
using order_tendstoD(2)[OF A0(2) ‹alpha > 0›] unfolding eventually_sequentially by blast
have A: "eventually (λn. measure (distr P borel (λx. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) > 2 * a} > 3 * alpha) sequentially"
apply (rule open_set_weak_conv_lsc[of _ Z])
by (auto simp add: PZ.real_distribution_axioms conv assms)
have B: "eventually (λn. measure (distr P borel (λx. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) ≥ (L- 1) * a} < alpha) sequentially"
apply (rule closed_set_weak_conv_usc[of _ Z])
by (auto simp add: PZ.real_distribution_axioms conv assms)
obtain N where N: "⋀n. n ≥ N ⟹ measure (distr P borel (λx. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) > 2 * a} > 3 * alpha"
"⋀n. n ≥ N ⟹ measure (distr P borel (λx. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) ≥ (L-1) * a} < alpha"
using eventually_conj[OF A B] unfolding eventually_sequentially by blast
have I: "PS.prob {x ∈ space M. abs((g x + birkhoff_sum f n x) / B n) < (L-1) * a} > 1 - alpha" if "n ≥ N" for n
proof -
let ?h = "(λx. (g x + birkhoff_sum f n x) / B n)"
have "measure (distr P borel ?h) {x. abs (x) ≥ (L-1) * a}
= measure P (?h-` {x. abs (x) ≥ (L-1) * a} ∩ space P)"
by (rule measure_distr, auto)
also have "... = measure P {x ∈ space M. abs(?h x) ≥ (L-1) * a}"
by (rule HOL.cong[of "measure P"], auto)
finally have A: "PS.prob {x ∈ space M. abs(?h x) ≥ (L-1) * a} < alpha"
using N(2)[OF that] by auto
have *: "{x ∈ space M. abs(?h x) < (L-1) * a} = space M - {x ∈ space M. abs(?h x) ≥ (L-1) * a}"
by auto
show ?thesis
unfolding * using A PS.prob_compl by auto
qed
have Main: "PS.prob (space M ∩ (T^^n)-`(K n)) > alpha" if "n ≥ N" "B (Suc n) > L * m n" for n
proof -
have "Suc n ≥ N" using that by auto
let ?h = "(λx. (g x + birkhoff_sum f (Suc n) x) / B (Suc n))"
have "measure (distr P borel ?h) {x. abs (x) > 2 * a}
= measure P (?h-` {x. abs (x) > 2 * a} ∩ space P)"
by (rule measure_distr, auto)
also have "... = measure P {x ∈ space M. abs(?h x) > 2 * a}"
by (rule HOL.cong[of "measure P"], auto)
finally have A: "PS.prob {x ∈ space M. abs(?h x) > 2 * a} > 3 * alpha"
using N(1)[OF ‹Suc n ≥ N›] by auto
define X where "X = {x ∈ space M. abs((g x + birkhoff_sum f n x) / B n) < (L-1) * a}
∩ {x ∈ space M. abs((g x + birkhoff_sum f (Suc n) x) / B (Suc n)) < (L-1) * a}
∩ {x ∈ space M. abs((g x + birkhoff_sum f (Suc n) x) / B (Suc n)) > 2 * a}"
have "(1 - alpha) + (1 - alpha) + 3 * alpha <
PS.prob {x ∈ space M. abs((g x + birkhoff_sum f n x) / B n) < (L-1) * a}
+ PS.prob {x ∈ space M. abs((g x + birkhoff_sum f (Suc n) x) / B (Suc n)) < (L-1) * a}
+ PS.prob {x ∈ space M. abs((g x + birkhoff_sum f (Suc n) x) / B (Suc n)) > 2 * a}"
using A I[OF ‹n ≥ N›] I[OF ‹Suc n ≥ N›] by auto
also have "... ≤ 2 + PS.prob X"
unfolding X_def by (rule PS.sum_measure_le_measure_inter3, auto)
finally have "PS.prob X > alpha" by auto
have "X ⊆ space M ∩ (T^^n)-`(K n)"
proof
have *: "B i ≤ m n" if "i ≤ n" for i
unfolding m_def by (rule Max_ge, auto simp add: that)
have **: "B i ≤ B (Suc n)" if "i ≤ Suc n" for i
proof (cases "i ≤ n")
case True
have "m n ≤ B (Suc n) / L"
using ‹L * m n < B (Suc n)› ‹L > 3› by (simp add: divide_simps algebra_simps)
also have "... ≤ B (Suc n)"
using Bpos[of "Suc n"] ‹L > 3› by (simp add: divide_simps algebra_simps)
finally show ?thesis using *[OF True] by simp
next
case False
then show ?thesis
using ‹i ≤ Suc n› le_SucE by blast
qed
have "m (Suc n) = B (Suc n)"
unfolding m_def by (rule Max_eqI, auto simp add: **)
fix x assume "x ∈ X"
then have "abs (g x + birkhoff_sum f n x) < (L-1) * a * B n"
unfolding X_def using Bpos[of n] by (auto simp add: algebra_simps divide_simps)
also have "... ≤ L * a * m n"
using *[of n] ‹L > 3› ‹a > 0› Bpos[of n] by (auto intro!: mult_mono)
also have "... ≤ a * B (Suc n)"
using ‹B (Suc n) > L * m n› less_imp_le ‹a > 0› by auto
finally have A: "abs (g x + birkhoff_sum f n x) < a * B (Suc n)"
by simp
have B: "abs(g x + birkhoff_sum f (Suc n) x) < (L-1) * a * B (Suc n)"
using ‹x ∈ X› unfolding X_def using Bpos[of "Suc n"] by (auto simp add: algebra_simps divide_simps)
have *: "f((T^^n) x) = (g x + birkhoff_sum f (Suc n) x) - (g x + birkhoff_sum f n x)"
apply (auto simp add: algebra_simps)
by (metis add.commute birkhoff_sum_1(2) birkhoff_sum_cocycle plus_1_eq_Suc)
have "abs(f((T^^n) x)) ≤ abs (g x + birkhoff_sum f (Suc n) x) + abs(g x + birkhoff_sum f n x)"
unfolding * by simp
also have "... < (L-1) * a * B (Suc n) + a * B (Suc n)"
using A B by auto
also have "... = L * a * m (Suc n)"
using ‹m (Suc n) = B (Suc n)› by (simp add: algebra_simps)
finally have Z1: "abs(f((T^^n) x)) < L * a * m (Suc n)"
by simp
have "2 * a * B (Suc n) < abs (g x + birkhoff_sum f (Suc n) x)"
using ‹x ∈ X› unfolding X_def using Bpos[of "Suc n"] by (auto simp add: algebra_simps divide_simps)
also have "... = abs(f((T^^n) x) + (g x + birkhoff_sum f n x))"
unfolding * by auto
also have "... ≤ abs(f((T^^n) x)) + abs (g x + birkhoff_sum f n x)"
by auto
also have "... < abs(f((T^^n) x)) + a * B (Suc n)"
using A by auto
finally have "abs(f((T^^n) x)) > a * B (Suc n)"
by simp
then have Z2: "abs(f((T^^n) x)) > a * L * m n"
using mult_strict_left_mono[OF ‹B (Suc n) > L * m n› ‹a > 0›] by auto
show "x ∈ space M ∩ (T ^^ n) -` K n"
using Z1 Z2 ‹x ∈ X› unfolding K_def X_def by (auto simp add: algebra_simps)
qed
have "PS.prob X ≤ PS.prob (space M ∩ (T^^n)-`(K n))"
by (rule PS.finite_measure_mono, fact, auto)
then show "alpha < PS.prob (space M ∩ (T ^^ n) -` K n)"
using ‹alpha < PS.prob X› by simp
qed
define A where "A = A0 ∩ {N + N0..}"
have "lower_asymptotic_density A = 1"
unfolding A_def by (rule lower_asymptotic_density_one_intersection, fact, simp)
moreover have "B (Suc n) ≤ L * m n" if "n ∈ A" for n
proof (rule ccontr)
assume "¬(B (Suc n) ≤ L * m n)"
then have "L * m n < B (Suc n)" "n ≥ N" "n ≥ N0"
using ‹n ∈ A› unfolding A_def by auto
then have "PS.prob (space M ∩ (T^^n)-`(K n)) > alpha"
using Main by auto
moreover have "PS.prob (space M ∩ (T^^n)-`(K n)) * indicator A0 n < alpha"
using N0[OF ‹n ≥ N0›] by simp
moreover have "indicator A0 n = (1::real)"
using ‹n ∈ A› unfolding A_def indicator_def by auto
ultimately show False
by simp
qed
ultimately show ?thesis
unfolding m_def by blast
qed
text ‹Now, we combine the two previous statements to prove the main theorem.›
theorem subexponential_growth:
"(λn. max 0 (ln (B n) /n)) ⇢ 0"
proof -
obtain a0 where a0: "a0 > 0" "PZ.prob {x. abs (x) > a0} > 0"
using PZ.not_Dirac_0_imp_positive_mass_away_0[OF non_trivial] by blast
define a where "a = a0/2"
have "a > 0" using ‹a0 > 0› unfolding a_def by auto
define alpha where "alpha = PZ.prob {x. abs (x) > a0}/4"
have "alpha > 0" unfolding alpha_def using a0 by auto
have "PZ.prob {x. abs (x) > 2 * a} > 3 * alpha"
using a0 unfolding a_def alpha_def by auto
obtain C0 where C0: "PZ.prob {x. abs(x) ≥ C0} < alpha" "C0 ≥ 3 * a"
using PZ.random_variable_small_tails[OF ‹alpha > 0›, of "λx. x"] by auto
define L where "L = C0/a + 1"
have "PZ.prob {x. abs(x) ≥ (L-1) * a} < alpha"
unfolding L_def using C0 ‹a>0› by auto
have "L > 3"
unfolding L_def using C0 ‹a > 0› by (auto simp add: divide_simps)
obtain C where C: "⋀n. B (Suc n) ≤ C * Max {B i|i. i ≤ n}" "C ≥ 1"
using upper_bound_C by blast
have C2: "B n ≤ C * Max {B i|i. i < n}" if "n > 0" for n
proof -
obtain m where m: "n = Suc m"
using ‹0 < n› gr0_implies_Suc by auto
have *: "i ≤ m ⟷ i < Suc m" for i by auto
show ?thesis using C(1)[of m] unfolding m * by auto
qed
have Mainj: "eventually (λn. ln (B n) / n ≤ (1+ln L)/j) sequentially" if "j > 0" for j::nat
proof -
have *: "∃A. lower_asymptotic_density A = 1 ∧ (∀n∈A. B (j * Suc n + k) ≤ L * Max {B (j * i + k) |i. i ≤ n})" for k
proof -
interpret Tj0: conservative M "(T^^j)" using conservative_power[of j] by auto
have *: "g x + birkhoff_sum f k x + Tj0.birkhoff_sum (λx. birkhoff_sum f j ((T ^^ k) x)) n x = g x + birkhoff_sum f (j * n + k) x" for x n
proof -
have "birkhoff_sum f (j * n + k) x = (∑i ∈ {..<k} ∪ {k..<j * n + k}. f ((T ^^ i) x))"
unfolding birkhoff_sum_def by (rule sum.cong, auto)
also have "... = (∑i ∈ {..<k}. f ((T ^^ i) x)) + (∑i ∈ {k..<j * n + k}. f ((T ^^ i) x))"
by (auto intro!: sum.union_disjoint)
also have "... = birkhoff_sum f k x + (∑s<j. ∑i<n. f ((T ^^ (i * j + s)) ((T^^k) x)))"
apply (subst sum_arith_progression)
unfolding birkhoff_sum_def Tj0.birkhoff_sum_def funpow_mult funpow_add'[symmetric]
by (auto simp add: algebra_simps intro!: sum.reindex_bij_betw[symmetric] bij_betw_byWitness[of _ "λa. a-k"])
also have "... = birkhoff_sum f k x + Tj0.birkhoff_sum (λx. birkhoff_sum f j ((T ^^ k) x)) n x"
unfolding birkhoff_sum_def Tj0.birkhoff_sum_def funpow_mult funpow_add'[symmetric]
by (auto simp add: algebra_simps intro!: sum.swap)
finally show ?thesis by simp
qed
interpret Tj: conservative_limit "T^^j" M P Z "λx. birkhoff_sum f j ((T^^k) x)" "λx. g x + birkhoff_sum f k x" "λn. B (j * n + k)"
apply standard
using PabsM Bpos non_trivial conv ‹j>0› unfolding * by (auto intro!: weak_conv_m_subseq strict_monoI)
show ?thesis
apply (rule Tj.upper_bound_L[OF ‹a > 0› ‹alpha > 0›]) by fact+
qed
have "∃A. ∀k. lower_asymptotic_density (A k) = 1 ∧ (∀n∈A k. B (j * Suc n + k) ≤ L * Max {B (j * i + k) |i. i ≤ n})"
apply (rule choice) using * by auto
then obtain A where A: "⋀k. lower_asymptotic_density (A k) = 1" "⋀k n. n ∈ A k ⟹ B (j * Suc n + k) ≤ L * Max {B (j * i + k) |i. i ≤ n}"
by blast
define Aj where "Aj = (⋂k<j. A k)"
have "lower_asymptotic_density Aj = 1"
unfolding Aj_def using A(1) by (simp add: lower_asymptotic_density_one_finite_Intersection)
define Bj where "Bj = UNIV - Aj"
have "upper_asymptotic_density Bj = 0"
using ‹lower_asymptotic_density Aj = 1›
unfolding Bj_def lower_upper_asymptotic_density_complement by simp
define M where "M = (λn. Max {B p |p. p < (n+1) * j})"
have "B 0 ≤ M n" for n
unfolding M_def apply (rule Max_ge, auto, rule exI[of _ 0]) using ‹j > 0› by auto
then have Mpos: "M n > 0" for n
by (metis Bpos not_le not_less_iff_gr_or_eq order.strict_trans)
have M_L: "M (Suc n) ≤ L * M n" if "n ∈ Aj" for n
proof -
have *: "B s ≤ L * M n" if "s < (n+2) * j" for s
proof (cases "s < (n+1) * j")
case True
have "B s ≤ M n"
unfolding M_def apply (rule Max_ge) using True by auto
also have "... ≤ L * M n" using ‹L > 3› ‹M n > 0› by auto
finally show ?thesis by simp
next
case False
then obtain k where "k < j" "s = (n+1) * j + k" using ‹s < (n+2) * j› le_Suc_ex by force
then have "B s = B (j * Suc n + k)" by (auto simp add: algebra_simps)
also have "... ≤ L * Max {B (j * i + k) |i. i ≤ n}"
using A(2)[of n k] ‹n ∈ Aj› unfolding Aj_def using ‹k < j› by auto
also have "... ≤ L * Max {B a|a. a < (n+1) * j}"
apply (rule mult_left_mono, rule Max_mono) using ‹L>3› proof (auto)
fix i assume "i ≤ n" show "∃a. B (j * i + k) = B a ∧ a < j + n * j"
apply (rule exI[of _ "j * i + k"]) using ‹k < j› ‹i ≤ n›
by (auto simp add: add_mono_thms_linordered_field(3) algebra_simps)
qed
finally show ?thesis unfolding M_def by auto
qed
show ?thesis
unfolding M_def apply (rule Max.boundedI)
using * unfolding M_def using ‹j > 0› by auto
qed
have M_C: "M (Suc n) ≤ C^j * M n" for n
proof -
have I: "Max {B s|s. s < (n+1) * j + k} ≤ C^k * M n" for k
proof (induction k)
case 0
show ?case
apply (rule Max.boundedI) unfolding M_def using ‹j > 0› by auto
next
case (Suc k)
have *: "B s ≤ C * C ^ k * M n" if "s < Suc (j + n * j + k)" for s
proof (cases "s < j + n * j + k")
case True
then have "B s ≤ C^k * M n" using iffD1[OF Max_le_iff, OF _ _ Suc.IH] by auto
also have "... ≤ C * C^k * M n" using ‹C ≥ 1› ‹M n > 0› by auto
finally show ?thesis by simp
next
case False
then have "s = j + n * j + k" using that by auto
then have "B s ≤ C * Max {B s|s. s < (n+1) * j + k}" using C2[of s] using ‹j > 0› by auto
also have "... ≤ C * C^k * M n" using Suc.IH ‹C ≥ 1› by auto
finally show ?thesis by simp
qed
show ?case
apply (rule Max.boundedI) using ‹j > 0› * by auto
qed
show ?thesis using I[of j] unfolding M_def by (auto simp add: algebra_simps)
qed
have I: "ln (M n) ≤ ln (M 0) + n * ln L + card (Bj ∩ {..<n}) * ln (C^j)" for n
proof (induction n)
case 0
show ?case by auto
next
case (Suc n)
show ?case
proof (cases "n ∈ Bj")
case True
then have *: "Bj ∩ {..<Suc n} = Bj ∩ {..<n} ∪ {n}" by auto
have **: "card (Bj ∩ {..<Suc n}) = card (Bj ∩ {..<n}) + card {n}"
unfolding * by (rule card_Un_disjoint, auto)
have "ln (M (Suc n)) ≤ ln (C^j * M n)"
using M_C ‹⋀n. 0 < M n› less_le_trans ln_le_cancel_iff by blast
also have "... = ln (M n) + ln (C^j)"
using ‹C ≥ 1› ‹0 < M n› ln_mult by auto
also have "... ≤ ln (M 0) + n * ln L + card (Bj ∩ {..<n}) * ln (C^j) + ln (C^j)"
using Suc.IH by auto
also have "... = ln (M 0) + n * ln L + card (Bj ∩ {..<Suc n}) * ln (C^j)"
using ** by (auto simp add: algebra_simps)
also have "... ≤ ln (M 0) + (Suc n) * ln L + card (Bj ∩ {..<Suc n}) * ln (C^j)"
using ‹L > 3› by auto
finally show ?thesis by auto
next
case False
have "M (Suc n) ≤ L * M n"
apply (rule M_L) using False unfolding Bj_def by auto
then have "ln (M (Suc n)) ≤ ln (L * M n)"
using ‹⋀n. 0 < M n› less_le_trans ln_le_cancel_iff by blast
also have "... = ln (M n) + ln L"
using ‹L > 3› ‹0 < M n› ln_mult by auto
also have "... ≤ ln (M 0) + Suc n * ln L + card (Bj ∩ {..<n}) * ln (C^j)"
using Suc.IH by (auto simp add: algebra_simps)
also have "... ≤ ln (M 0) + Suc n * ln L + card (Bj ∩ {..<Suc n}) * ln (C^j)"
using ‹C ≥ 1› by (auto intro!: mult_right_mono card_mono)
finally show ?thesis by auto
qed
qed
have "ln (M n)/n ≤ ln (M 0)* (1/n) + ln L + (card (Bj ∩ {..<n})/n) * ln (C^j)" if "n≥1" for n
using that apply (auto simp add: algebra_simps divide_simps)
by (metis (no_types, hide_lams) I add.assoc mult.commute mult_left_mono of_nat_0_le_iff semiring_normalization_rules(34))
then have A: "eventually (λn. ln (M n)/n ≤ ln (M 0)* (1/n) + ln L + (card (Bj ∩ {..<n})/n) * ln (C^j)) sequentially"
unfolding eventually_sequentially by blast
have *: "(λn. ln (M 0)*(1/n) + ln L + (card (Bj ∩ {..<n})/n) * ln (C^j)) ⇢ ln (M 0) * 0 + ln L + 0 *ln (C^j)"
by (intro tendsto_intros upper_asymptotic_density_zero_lim, fact)
have B: "eventually (λn. ln (M 0)*(1/n) + ln L + (card (Bj ∩ {..<n})/n) * ln (C^j) < 1 + ln L) sequentially"
by (rule order_tendstoD[OF *], auto)
have "eventually (λn. ln (M n)/n < 1 + ln L) sequentially"
using eventually_conj[OF A B] by (simp add: eventually_mono)
then obtain N where N: "⋀n. n ≥ N ⟹ ln (M n)/n < 1 + ln L"
unfolding eventually_sequentially by blast
have "ln (B p) / p ≤ (1+ln L) / j" if "p ≥ (N+1) * j" for p
proof -
define n where "n = p div j"
have "n ≥ N+1" unfolding n_def using that
by (metis ‹0 < j› div_le_mono div_mult_self_is_m)
then have "n ≥ N" "n ≥ 1" by auto
have *: "p < (n+1) * j" "n * j ≤ p"
unfolding n_def using ‹j > 0› dividend_less_div_times by auto
have "B p ≤ M n"
unfolding M_def apply (rule Max_ge) using * by auto
then have "ln (B p) ≤ ln (M n)"
using Bpos Mpos ln_le_cancel_iff by blast
also have "... ≤ n * (1+ln L)"
using N[OF ‹n ≥ N›] ‹n ≥ 1› by (auto simp add: divide_simps algebra_simps)
also have "... ≤ (p/j) * (1+ln L)"
apply (rule mult_right_mono) using *(2) ‹j > 0› ‹L > 3›
apply (auto simp add: divide_simps algebra_simps)
using of_nat_mono by fastforce
finally show ?thesis
using ‹j > 0› that by (simp add: algebra_simps divide_simps)
qed
then show "eventually (λp. ln (B p) / p ≤ (1+ln L)/j) sequentially"
unfolding eventually_sequentially by auto
qed
show "(λn. max 0 (ln (B n) / real n)) ⇢ 0"
proof (rule order_tendstoI)
fix e::real assume "e > 0"
have *: "(λj. (1+ln L) * (1/j)) ⇢ (1+ln L) * 0"
by (intro tendsto_intros)
have "eventually (λj. (1+ln L) * (1/j) < e) sequentially"
apply (rule order_tendstoD[OF *]) using ‹e > 0› by auto
then obtain j::nat where j: "j > 0" "(1+ln L) * (1/j) < e"
unfolding eventually_sequentially using add.right_neutral le_eq_less_or_eq by fastforce
show "eventually (λn. max 0 (ln (B n) / real n) < e) sequentially"
using Mainj[OF ‹j > 0›] j(2) ‹e > 0› by (simp add: eventually_mono)
qed (simp add: max.strict_coboundedI1)
qed
end
subsection ‹Normalizing sequences grow at most polynomially in probability preserving systems›
text ‹In probability preserving systems, normalizing sequences grow at most polynomially.
The proof, also given in~\cite{gouezel_normalizing_sequences}, is considerably easier than
the conservative case. We prove that $B_{n+1} \leq C B_n$ (more precisely, this only holds if
$B_{n+1}$ is large enough), by arguing that $S_{n+1} f = S_n f + f \circ T^n$, where $f\circ T^n$
is negligible if $B_{n+1}$ is large thanks to the measure preservation. We also prove that
$B_{2n} \leq E B_n$, by writing $S_{2n} f = S_n f+ S_n f \circ T^n$ and arguing that the two terms
on the right have the same distribution. Finally, combining these two estimates, the polynomial
growth follows readily.›
locale pmpt_limit =
pmpt M + PZ: real_distribution Z
for M::"'a measure" and Z::"real measure" +
fixes f::"'a ⇒ real" and B::"nat ⇒ real"
assumes Bpos: "⋀n. B n > 0"
and M [measurable]: "f ∈ borel_measurable M"
and non_trivial: "PZ.prob {0} < 1"
and conv: "weak_conv_m (λn. distr P borel (λx. (birkhoff_sum f n x) / B n)) Z"
begin
text ‹First, we prove that $B_{n+1} \leq C B_n$ if $B_{n+1}$ is large enough.›
lemma upper_bound_CD:
"∃C D. (∀n. B (Suc n) ≤ D ∨ B (Suc n) ≤ C * B n) ∧ C ≥ 1"
proof -
obtain a where a: "a > 0" "PZ.prob {x. abs (x) > a} > 0"
using PZ.not_Dirac_0_imp_positive_mass_away_0[OF non_trivial] by blast
define alpha where "alpha = PZ.prob {x. abs (x) > a}/4"
have "alpha > 0" unfolding alpha_def using a by auto
have A: "PZ.prob {x. abs (x) > a} > 3 * alpha"
using a unfolding alpha_def by auto
obtain C0 where C0: "PZ.prob {x. abs(x) ≥ C0} < alpha" "C0 ≥ a"
using PZ.random_variable_small_tails[OF ‹alpha > 0›, of "λx. x"] by auto
have A: "eventually (λn. measure (distr M borel (λx. (birkhoff_sum f n x) / B n)) {x. abs (x) > a} > 3 * alpha) sequentially"
apply (rule open_set_weak_conv_lsc[of _ Z])
by (auto simp add: PZ.real_distribution_axioms conv A)
have B: "eventually (λn. measure (distr M borel (λx. (birkhoff_sum f n x) / B n)) {x. abs (x) ≥ C0} < alpha) sequentially"
apply (rule closed_set_weak_conv_usc[of _ Z])
by (auto simp add: PZ.real_distribution_axioms conv C0)
obtain N where N: "⋀n. n ≥ N ⟹ measure (distr M borel (λx. (birkhoff_sum f n x) / B n)) {x. abs x > a} > 3 * alpha"
"⋀n. n ≥ N ⟹ measure (distr M borel (λx. (birkhoff_sum f n x) / B n)) {x. abs x ≥ C0} < alpha"
using eventually_conj[OF A B] unfolding eventually_sequentially by blast
obtain Cf where Cf: "prob {x ∈ space M. abs(f x) ≥ Cf} < alpha" "Cf ≥ 1"
using random_variable_small_tails[OF ‹alpha > 0› M] by auto
have Main: "B (Suc n) ≤ (2*C0/a) * B n" if "n ≥ N" "B (Suc n) ≥ 2 * Cf/a" for n
proof -
have "Suc n ≥ N" using that by auto
let ?h = "(λx. (birkhoff_sum f (Suc n) x) / B (Suc n))"
have "measure (distr M borel ?h) {x. abs (x) > a}
= measure M (?h-` {x. abs (x) > a} ∩ space M)"
by (rule measure_distr, auto)
also have "... = prob {x ∈ space M. abs(?h x) > a}"
by (rule HOL.cong[of "measure M"], auto)
finally have A: "prob {x ∈ space M. abs(?h x) > a} > 3 * alpha"
using N(1)[OF ‹Suc n ≥ N›] by auto
let ?h = "(λx. (birkhoff_sum f n x) / B n)"
have "measure (distr M borel ?h) {x. abs (x) ≥ C0}
= measure M (?h-` {x. abs (x) ≥ C0} ∩ space M)"
by (rule measure_distr, auto)
also have "... = measure M {x ∈ space M. abs(?h x) ≥ C0}"
by (rule HOL.cong[of "measure M"], auto)
finally have B0: "prob {x ∈ space M. abs(?h x) ≥ C0} < alpha"
using N(2)[OF ‹n ≥ N›] by auto
have *: "{x ∈ space M. abs(?h x) < C0} = space M - {x ∈ space M. abs(?h x) ≥ C0}"
by auto
have B: "prob {x ∈ space M. abs(?h x) < C0} > 1- alpha"
unfolding * using B0 prob_compl by auto
have "prob {x ∈ space M. abs(f ((T^^n) x)) ≥ Cf} = prob ((T^^n)-`{x ∈ space M. abs(f x) ≥ Cf} ∩ space M)"
by (rule HOL.cong[of "prob"], auto)
also have "... = prob {x ∈ space M. abs(f x) ≥ Cf}"
using T_vrestr_same_measure(2)[of "{x ∈ space M. abs(f x) ≥ Cf}" n]
unfolding vimage_restr_def by auto
finally have C0: "prob {x ∈ space M. abs(f ((T^^n) x)) ≥ Cf} < alpha"
using Cf by simp
have *: "{x ∈ space M. abs(f ((T^^n) x)) < Cf} = space M - {x ∈ space M. abs(f ((T^^n) x)) ≥ Cf}"
by auto
have C: "prob {x ∈ space M. abs(f ((T^^n) x)) < Cf} > 1- alpha"
unfolding * using C0 prob_compl by auto
define X where "X = {x ∈ space M. abs((birkhoff_sum f n x) / B n) < C0}
∩ {x ∈ space M. abs((birkhoff_sum f (Suc n) x) / B (Suc n)) > a}
∩ {x ∈ space M. abs(f ((T^^n) x)) < Cf}"
have "(1 - alpha) + 3 * alpha + (1 - alpha) <
prob {x ∈ space M. abs((birkhoff_sum f n x) / B n) < C0}
+ prob {x ∈ space M. abs((birkhoff_sum f (Suc n) x) / B (Suc n)) > a}
+ prob {x ∈ space M. abs(f ((T^^n) x)) < Cf}"
using A B C by auto
also have "... ≤ 2 + prob X"
unfolding X_def by (rule sum_measure_le_measure_inter3, auto)
finally have "prob X > alpha" by auto
then have "X ≠ {}" using ‹alpha > 0› by auto
then obtain x where "x ∈ X" by auto
have *: "abs(birkhoff_sum f n x) ≤ C0 * B n"
"abs(birkhoff_sum f (Suc n) x) ≥ a * B (Suc n)"
"abs(f((T^^n) x)) ≤ Cf"
using ‹x ∈ X› Bpos[of n] Bpos[of "Suc n"] unfolding X_def by (auto simp add: divide_simps)
have "a * B (Suc n) ≤ abs(birkhoff_sum f (Suc n) x)"
using * by simp
also have "... = abs(birkhoff_sum f n x + f ((T^^n) x))"
by (metis Groups.add_ac(2) One_nat_def birkhoff_sum_1(3) birkhoff_sum_cocycle plus_1_eq_Suc)
also have "... ≤ C0 * B n + Cf"
using * by auto
also have "... ≤ C0 * B n + (a/2) * B (Suc n)"
using ‹B (Suc n) ≥ 2 * Cf/a› ‹a > 0› by (auto simp add: divide_simps algebra_simps)
finally show "B (Suc n) ≤ (2 * C0/a) * B n"
using ‹a > 0› by (auto simp add: divide_simps algebra_simps)
qed
define C1 where "C1 = Max {B(Suc n)/B n |n. n ≤ N}"
have *: "B (Suc n) ≤ max ((2 * C0/a)) C1 * B n" if "B (Suc n) > 2 * Cf/a" for n
proof (cases "n > N")
case True
then show ?thesis
using Main[OF less_imp_le[OF ‹n > N›] less_imp_le[OF that]] Bpos[of n]
by (meson max.cobounded1 order_trans mult_le_cancel_iff1)
next
case False
then have "n ≤ N" by simp
have "B(Suc n)/B n ≤ C1"
unfolding C1_def apply (rule Max_ge) using ‹n ≤ N› by auto
then have "B (Suc n) ≤ C1 * B n"
using Bpos[of n] by (simp add: divide_simps)
then show ?thesis
using Bpos[of n] by (meson max.cobounded2 order_trans mult_le_cancel_iff1)
qed
show ?thesis
apply (rule exI[of _ "max ((2 * C0/a)) C1"], rule exI[of _ "2 * Cf/a"])
using * linorder_not_less ‹C0 ≥ a› ‹a > 0› by (auto intro!: max.coboundedI1)
qed
text ‹Second, we prove that $B_{2n} \leq E B_n$.›
lemma upper_bound_E:
"∃E. ∀n. B (2 * n) ≤ E * B n"
proof -
obtain a where a: "a > 0" "PZ.prob {x. abs (x) > a} > 0"
using PZ.not_Dirac_0_imp_positive_mass_away_0[OF non_trivial] by blast
define alpha where "alpha = PZ.prob {x. abs (x) > a}/4"
have "alpha > 0" unfolding alpha_def using a by auto
have A: "PZ.prob {x. abs (x) > a} > 3 * alpha"
using a unfolding alpha_def by auto
obtain C0 where C0: "PZ.prob {x. abs(x) ≥ C0} < alpha" "C0 ≥ a"
using PZ.random_variable_small_tails[OF ‹alpha > 0›, of "λx. x"] by auto
have A: "eventually (λn. measure (distr M borel (λx. (birkhoff_sum f n x) / B n)) {x. abs (x) > a} > 3 * alpha) sequentially"
apply (rule open_set_weak_conv_lsc[of _ Z])
by (auto simp add: PZ.real_distribution_axioms conv A)
have B: "eventually (λn. measure (distr M borel (λx. (birkhoff_sum f n x) / B n)) {x. abs (x) ≥ C0} < alpha) sequentially"
apply (rule closed_set_weak_conv_usc[of _ Z])
by (auto simp add: PZ.real_distribution_axioms conv C0)
obtain N where N: "⋀n. n ≥ N ⟹ measure (distr M borel (λx. (birkhoff_sum f n x) / B n)) {x. abs x > a} > 3 * alpha"
"⋀n. n ≥ N ⟹ measure (distr M borel (λx. (birkhoff_sum f n x) / B n)) {x. abs x ≥ C0} < alpha"
using eventually_conj[OF A B] unfolding eventually_sequentially by blast
have Main: "B (2 * n) ≤ (2*C0/a) * B n" if "n ≥ N" for n
proof -
have "2 * n ≥ N" using that by auto
let ?h = "(λx. (birkhoff_sum f (2 * n) x) / B (2 * n))"
have "measure (distr M borel ?h) {x. abs (x) > a}
= measure M (?h-` {x. abs (x) > a} ∩ space M)"
by (rule measure_distr, auto)
also have "... = prob {x ∈ space M. abs(?h x) > a}"
by (rule HOL.cong[of "measure M"], auto)
finally have A: "prob {x ∈ space M. abs((birkhoff_sum f (2 * n) x) / B (2 * n)) > a} > 3 * alpha"
using N(1)[OF ‹2 * n ≥ N›] by auto
let ?h = "(λx. (birkhoff_sum f n x) / B n)"
have "measure (distr M borel ?h) {x. abs (x) ≥ C0}
= measure M (?h-` {x. abs (x) ≥ C0} ∩ space M)"
by (rule measure_distr, auto)
also have "... = measure M {x ∈ space M. abs(?h x) ≥ C0}"
by (rule HOL.cong[of "measure M"], auto)
finally have B0: "prob {x ∈ space M. abs(?h x) ≥ C0} < alpha"
using N(2)[OF ‹n ≥ N›] by auto
have *: "{x ∈ space M. abs(?h x) < C0} = space M - {x ∈ space M. abs(?h x) ≥ C0}"
by auto
have B: "prob {x ∈ space M. abs((birkhoff_sum f n x) / B n) < C0} > 1- alpha"
unfolding * using B0 prob_compl by auto
have "prob {x ∈ space M. abs(?h ((T^^n) x)) < C0} = prob ((T^^n)-`{x ∈ space M. abs(?h x) < C0} ∩ space M)"
by (rule HOL.cong[of "prob"], auto)
also have "... = prob {x ∈ space M. abs(?h x) < C0}"
using T_vrestr_same_measure(2)[of "{x ∈ space M. abs(?h x) < C0}" n]
unfolding vimage_restr_def by auto
finally have C: "prob {x ∈ space M. abs((birkhoff_sum f n ((T^^n) x)) / B n) < C0} > 1- alpha"
using B by simp
define X where "X = {x ∈ space M. abs((birkhoff_sum f n x) / B n) < C0}
∩ {x ∈ space M. abs((birkhoff_sum f (2* n) x) / B (2* n)) > a}
∩ {x ∈ space M. abs((birkhoff_sum f n ((T^^n) x)) / B n) < C0}"
have "(1 - alpha) + 3 * alpha + (1 - alpha) <
prob {x ∈ space M. abs((birkhoff_sum f n x) / B n) < C0}
+ prob {x ∈ space M. abs((birkhoff_sum f (2* n) x) / B (2* n)) > a}
+ prob {x ∈ space M. abs((birkhoff_sum f n ((T^^n) x)) / B n) < C0}"
using A B C by auto
also have "... ≤ 2 + prob X"
unfolding X_def by (rule sum_measure_le_measure_inter3, auto)
finally have "prob X > alpha" by auto
then have "X ≠ {}" using ‹alpha > 0› by auto
then obtain x where "x ∈ X" by auto
have *: "abs(birkhoff_sum f n x) ≤ C0 * B n"
"abs((birkhoff_sum f (2 * n) x)) ≥ a * B (2 * n)"
"abs((birkhoff_sum f n ((T^^n) x))) ≤ C0 * B n"
using ‹x ∈ X› Bpos[of n] Bpos[of "2* n"] unfolding X_def by (auto simp add: divide_simps)
have "a * B (2 * n) ≤ abs(birkhoff_sum f (2 * n) x)"
using * by simp
also have "... = abs(birkhoff_sum f n x + birkhoff_sum f n ((T^^n) x))"
unfolding birkhoff_sum_cocycle[of f n n x, symmetric] by (simp add: mult_2)
also have "... ≤ 2 * C0 * B n"
using * by auto
finally show "B (2 * n) ≤ (2 * C0/a) * B n"
using ‹a > 0› by (auto simp add: divide_simps algebra_simps)
qed
define C1 where "C1 = Max {B(2 * n)/B n |n. n ≤ N}"
have *: "B (2*n) ≤ max ((2 * C0/a)) C1 * B n" for n
proof (cases "n > N")
case True
then show ?thesis
using Main[OF less_imp_le[OF ‹n > N›]] Bpos[of n]
by (meson max.cobounded1 order_trans mult_le_cancel_iff1)
next
case False
then have "n ≤ N" by simp
have "B(2*n)/B n ≤ C1"
unfolding C1_def apply (rule Max_ge) using ‹n ≤ N› by auto
then have "B (2*n) ≤ C1 * B n"
using Bpos[of n] by (simp add: divide_simps)
then show ?thesis
using Bpos[of n] by (meson max.cobounded2 order_trans mult_le_cancel_iff1)
qed
show ?thesis
apply (rule exI[of _ "max ((2 * C0/a)) C1"])
using * by auto
qed
text ‹Finally, we combine the estimates in the two lemmas above to show that $B_n$ grows
at most polynomially.›
theorem polynomial_growth:
"∃C K. ∀n>0. B n ≤ C * (real n)^K"
proof -
obtain C D where C: "C ≥ 1" "⋀n. B (Suc n) ≤ D ∨ B (Suc n) ≤ C * B n"
using upper_bound_CD by blast
obtain E where E: "⋀n. B (2 * n) ≤ E * B n"
using upper_bound_E by blast
have "E ≥ 1" using E[of 0] Bpos[of 0] by auto
obtain k::nat where "log 2 (max C E) ≤ k"
using real_arch_simple[of "log 2 (max C E)"] by blast
then have "max C E ≤ 2^k"
by (meson less_log_of_power not_less one_less_numeral_iff semiring_norm(76))
then have "C ≤ 2^k" "E ≤ 2^k"
by auto
define P where "P = max D (B 0)"
have "P > 0" unfolding P_def using Bpos[of 0] by auto
have Main: "⋀n. n < 2^r ⟹ B n ≤ P * 2^(2 * k * r)" for r
proof (induction r)
case 0
fix n::nat assume "n < 2^0"
then show "B n ≤ P * 2 ^ (2 * k * 0)"
unfolding P_def by auto
next
case (Suc r)
fix n::nat assume "n < 2^(Suc r)"
consider "even n" | "B n ≤ D" | "odd n ∧ B n > D" by linarith
then show "B n ≤ P * 2 ^ (2 * k * Suc r)"
proof (cases)
case 1
then obtain m where m: "n = 2 * m" by (rule evenE)
have "m < 2^r"
using ‹n < 2^(Suc r)› unfolding m by auto
then have *: "B m ≤ P * 2^(2 * k * r)"
using Suc.IH by auto
have "B n ≤ E * B m"
unfolding m using E by simp
also have "... ≤ 2^k * B m"
apply (rule mult_right_mono[OF _ less_imp_le[OF Bpos[of m]]])
using ‹E ≤ 2^k› by simp
also have "... ≤ 2^k * (P * 2^(2 * k * r))"
apply (rule mult_left_mono[OF *]) by auto
also have "... = P * 2^(2 * k * r + k)"
by (auto simp add: algebra_simps power_add)
also have "... ≤ P * 2^(2 * k * Suc r)"
apply (rule mult_left_mono) using ‹P > 0› by auto
finally show ?thesis by simp
next
case 2
have "D ≤ P * 1"
unfolding P_def by auto
also have "... ≤ P * 2^(2 * k * Suc r)"
by (rule mult_left_mono[OF _ less_imp_le[OF ‹P > 0›]], auto)
finally show ?thesis using 2 by simp
next
case 3
then obtain m where m: "n = 2 * m + 1"
using oddE by blast
have "m < 2^r"
using ‹n < 2^(Suc r)› unfolding m by auto
then have *: "B m ≤ P * 2^(2 * k * r)"
using Suc.IH by auto
have "B n > D" using 3 by auto
then have "B n ≤ C * B (2 * m)"
unfolding m using C(2)[of "2 * m"] by auto
also have "... ≤ C * (E * B m)"
apply (rule mult_left_mono) using ‹C ≥ 1› E[of m] by auto
also have "... ≤ 2^k * (2^k * B m)"
apply (intro mult_mono) using ‹C ≤ 2^k› ‹C ≥ 1› ‹E ≥ 1› ‹E ≤ 2^k› Bpos[of m] by auto
also have "... ≤ 2^k * (2^k * (P * 2^(2 * k * r)))"
apply (intro mult_left_mono) using * by auto
also have "... = P * 2^(2 * k * Suc r)"
using ‹P > 0› by (simp add: algebra_simps divide_simps mult_2_right power_add)
finally show ?thesis by simp
qed
qed
have I: "B n ≤ (P * 2^(2 * k)) * n^(2 * k)" if "n > 0" for n
proof -
define r::nat where "r = nat(floor(log 2 (real n)))"
have *: "int r = floor(log 2 (real n))"
unfolding r_def using ‹0 < n› by auto
have I: "2^r ≤ n ∧ n < 2^(r+1)"
using floor_log_nat_eq_powr_iff[OF _ ‹n > 0›, of 2 r] * by auto
then have "B n ≤ P * 2^(2 * k * (r+1))"
using Main[of n "r+1"] by auto
also have "... = (P * 2^(2 * k)) * ((2^r)^(2*k))"
by (simp add: power_add power_mult[symmetric])
also have "... ≤ (P * 2^(2 * k)) * n^(2 * k)"
apply (rule mult_left_mono) using I ‹P > 0› by (auto simp add: power_mono)
finally show ?thesis by simp
qed
show ?thesis
apply (rule exI[of _ "P * 2^(2 * k)"], rule exI[of _ "2 * k"])
using I by auto
qed
end
end